summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/amt.c4
-rw-r--r--drivers/net/bareudp.c50
-rw-r--r--drivers/net/bonding/bond_main.c292
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/can/Kconfig1
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/bxcan.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/cc770/cc770_isa.c2
-rw-r--r--drivers/net/can/cc770/cc770_platform.c34
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c2
-rw-r--r--drivers/net/can/dev/dev.c3
-rw-r--r--drivers/net/can/dev/netlink.c102
-rw-r--r--drivers/net/can/esd/esd_402_pci-core.c5
-rw-r--r--drivers/net/can/esd/esdacc.c55
-rw-r--r--drivers/net/can/esd/esdacc.h38
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c52
-rw-r--r--drivers/net/can/flexcan/flexcan.h2
-rw-r--r--drivers/net/can/grcan.c2
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c2
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/kvaser_pciefd.c47
-rw-r--r--drivers/net/can/m_can/m_can.c133
-rw-r--r--drivers/net/can/m_can/m_can_platform.c2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c3
-rw-r--r--drivers/net/can/rcar/rcar_can.c2
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c2
-rw-r--r--drivers/net/can/rockchip/Kconfig9
-rw-r--r--drivers/net/can/rockchip/Makefile10
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c967
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-ethtool.c73
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-rx.c299
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-timestamp.c105
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-tx.c167
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd.h553
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c2
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c11
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c36
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c2
-rw-r--r--drivers/net/can/sun4i_can.c2
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/Kconfig3
-rw-r--r--drivers/net/can/usb/esd_usb.c6
-rw-r--r--drivers/net/can/usb/etas_es58x/es581_4.c2
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c2
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c2
-rw-r--r--drivers/net/can/usb/f81604.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h26
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c63
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c41
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c114
-rw-r--r--drivers/net/can/usb/mcba_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c3
-rw-r--r--drivers/net/can/xilinx_can.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c17
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c7
-rw-r--r--drivers/net/dsa/b53/b53_spi.c2
-rw-r--r--drivers/net/dsa/lan9303-core.c29
-rw-r--r--drivers/net/dsa/microchip/Kconfig9
-rw-r--r--drivers/net/dsa/microchip/Makefile2
-rw-r--r--drivers/net/dsa/microchip/ksz8.c (renamed from drivers/net/dsa/microchip/ksz8795.c)123
-rw-r--r--drivers/net/dsa/microchip/ksz8.h3
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c4
-rw-r--r--drivers/net/dsa/microchip/ksz8_reg.h (renamed from drivers/net/dsa/microchip/ksz8795_reg.h)15
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c287
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h5
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h12
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c450
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h60
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c5
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c23
-rw-r--r--drivers/net/dsa/mt7530-mmio.c1
-rw-r--r--drivers/net/dsa/mt7530.c49
-rw-r--r--drivers/net/dsa/mt7530.h20
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c2
-rw-r--r--drivers/net/dsa/ocelot/felix.c131
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c13
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c2
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c10
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c8
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c11
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c545
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h2
-rw-r--r--drivers/net/dummy.c3
-rw-r--r--drivers/net/ethernet/Kconfig11
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/adin1110.c8
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c3
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c21
-rw-r--r--drivers/net/ethernet/alteon/acenic.c26
-rw-r--r--drivers/net/ethernet/alteon/acenic.h8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h72
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c173
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h68
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c163
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c27
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/amd/mvme147.c7
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h10
-rw-r--r--drivers/net/ethernet/apple/bmac.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c25
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c4
-rw-r--r--drivers/net/ethernet/atheros/Kconfig4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c179
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c5
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c383
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c98
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c37
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h389
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c5
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c19
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.h3
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c39
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c30
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/tp.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c8
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c65
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h38
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c106
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c157
-rw-r--r--drivers/net/ethernet/davicom/dm9051.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h3
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c59
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c7
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c81
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h9
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c31
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c108
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c444
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h27
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c17
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c15
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c29
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c5
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c5
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c10
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.c17
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c5
-rw-r--r--drivers/net/ethernet/google/gve/gve.h6
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c182
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h59
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c33
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c10
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c221
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h10
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c172
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c185
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c40
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c59
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.c89
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.h13
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c160
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c25
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c46
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.h1
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.c510
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.h46
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c178
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c229
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c111
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c99
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c109
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c230
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c157
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.c2430
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.h540
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser_rt.c861
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c211
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c329
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c403
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c20
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c23
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c110
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c395
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h92
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c11
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c32
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h1
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h28
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c81
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c108
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h12
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c143
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c4
-rw-r--r--drivers/net/ethernet/jme.c10
-rw-r--r--drivers/net/ethernet/lantiq_etop.c5
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c5
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c18
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c116
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c147
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c59
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c3
-rw-r--r--drivers/net/ethernet/mediatek/airoha_eth.c538
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h14
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c10
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c9
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c315
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h926
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c2604
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c995
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c1300
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h361
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c260
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c480
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c2146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h834
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c1216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c579
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c640
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h514
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c780
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c1231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h270
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c493
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c158
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h20
-rw-r--r--drivers/net/ethernet/meta/Kconfig2
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h37
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c77
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c75
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c13
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h6
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c27
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h40
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c50
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c138
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c59
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h10
-rw-r--r--drivers/net/ethernet/microchip/Kconfig7
-rw-r--r--drivers/net/ethernet/microchip/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/fdma/Kconfig18
-rw-r--r--drivers/net/ethernet/microchip/fdma/Makefile7
-rw-r--r--drivers/net/ethernet/microchip/fdma/fdma_api.c146
-rw-r--r--drivers/net/ethernet/microchip/fdma/fdma_api.h243
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c127
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c646
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c35
-rw-r--r--drivers/net/ethernet/microchip/lan865x/Kconfig19
-rw-r--r--drivers/net/ethernet/microchip/lan865x/Makefile6
-rw-r--r--drivers/net/ethernet/microchip/lan865x/lan865x.c429
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c11
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c409
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h58
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c382
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h31
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c12
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c6
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c14
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c6
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c62
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c81
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c96
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c279
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c12
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c2
-rw-r--r--drivers/net/ethernet/oa_tc6.c1361
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c2
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h25
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c161
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c420
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/Kconfig19
-rw-r--r--drivers/net/ethernet/realtek/Makefile1
-rw-r--r--drivers/net/ethernet/realtek/r8169.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c76
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c3
-rw-r--r--drivers/net/ethernet/realtek/rtase/Makefile10
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h340
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c2288
-rw-r--r--drivers/net/ethernet/renesas/ravb.h1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c43
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c2
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c3
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c3
-rw-r--r--drivers/net/ethernet/seeq/ether3.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c127
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c3
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c7
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h1
-rw-r--r--drivers/net/ethernet/sfc/ptp.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c3
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c7
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c125
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h26
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c56
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.h4
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c597
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c164
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c96
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c78
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c108
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c292
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c154
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c34
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.h2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c77
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c474
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h39
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c297
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h63
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c3
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c72
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.h73
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_classifier.c1
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c18
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c24
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c30
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c202
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h20
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c9
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c36
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.h158
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c7
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c3
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c20
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c5
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h6
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c8
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h140
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c459
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c4
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/gtp.c7
-rw-r--r--drivers/net/hamradio/6pack.c60
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/ieee802154/Kconfig1
-rw-r--r--drivers/net/ieee802154/cc2520.c2
-rw-r--r--drivers/net/ieee802154/mcr20a.c5
-rw-r--r--drivers/net/ipa/ipa_power.c7
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c4
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c3
-rw-r--r--drivers/net/loopback.c4
-rw-r--r--drivers/net/macsec.c22
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/mctp/Kconfig5
-rw-r--r--drivers/net/mctp/mctp-i3c.c2
-rw-r--r--drivers/net/mctp/mctp-serial.c136
-rw-r--r--drivers/net/mdio/fwnode_mdio.c3
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c1
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c54
-rw-r--r--drivers/net/mdio/of_mdio.c5
-rw-r--r--drivers/net/net_failover.c4
-rw-r--r--drivers/net/netconsole.c200
-rw-r--r--drivers/net/netdevsim/dev.c15
-rw-r--r--drivers/net/netdevsim/fib.c1
-rw-r--r--drivers/net/netkit.c7
-rw-r--r--drivers/net/nlmon.c4
-rw-r--r--drivers/net/pcs/pcs-xpcs-wx.c2
-rw-r--r--drivers/net/phy/Kconfig11
-rw-r--r--drivers/net/phy/Makefile4
-rw-r--r--drivers/net/phy/air_en8811h.c2
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c44
-rw-r--r--drivers/net/phy/aquantia/aquantia_leds.c3
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c75
-rw-r--r--drivers/net/phy/ax88796b_rust.rs7
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c2
-rw-r--r--drivers/net/phy/bcm84881.c4
-rw-r--r--drivers/net/phy/dp83822.c35
-rw-r--r--drivers/net/phy/dp83869.c1
-rw-r--r--drivers/net/phy/dp83td510.c119
-rw-r--r--drivers/net/phy/dp83tg720.c154
-rw-r--r--drivers/net/phy/marvell-88x2222.c2
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/microchip_t1.c990
-rw-r--r--drivers/net/phy/microchip_t1s.c30
-rw-r--r--drivers/net/phy/motorcomm.c684
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c2
-rw-r--r--drivers/net/phy/open_alliance_helpers.c77
-rw-r--r--drivers/net/phy/open_alliance_helpers.h47
-rw-r--r--drivers/net/phy/phy.c22
-rw-r--r--drivers/net/phy/phy_device.c113
-rw-r--r--drivers/net/phy/phy_link_topology.c105
-rw-r--r--drivers/net/phy/phylink.c45
-rw-r--r--drivers/net/phy/qcom/at803x.c2
-rw-r--r--drivers/net/phy/qcom/qca807x.c12
-rw-r--r--drivers/net/phy/qcom/qca83xx.c10
-rw-r--r--drivers/net/phy/qt2025.rs103
-rw-r--r--drivers/net/phy/realtek.c30
-rw-r--r--drivers/net/phy/sfp-bus.c26
-rw-r--r--drivers/net/phy/vitesse.c179
-rw-r--r--drivers/net/ppp/ppp_async.c4
-rw-r--r--drivers/net/ppp/ppp_deflate.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c8
-rw-r--r--drivers/net/ppp/ppp_mppe.c2
-rw-r--r--drivers/net/ppp/ppp_synctty.c2
-rw-r--r--drivers/net/pse-pd/pse_core.c22
-rw-r--r--drivers/net/pse-pd/tps23881.c21
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/slip/slhc.c59
-rw-r--r--drivers/net/sungem_phy.c35
-rw-r--r--drivers/net/tap.c1
-rw-r--r--drivers/net/team/team_core.c8
-rw-r--r--drivers/net/tun.c12
-rw-r--r--drivers/net/usb/cdc_ether.c3
-rw-r--r--drivers/net/usb/ipheth.c20
-rw-r--r--drivers/net/usb/net1080.c2
-rw-r--r--drivers/net/usb/r8152.c17
-rw-r--r--drivers/net/usb/sierra_net.c2
-rw-r--r--drivers/net/usb/usbnet.c49
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c185
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c2
-rw-r--r--drivers/net/vrf.c9
-rw-r--r--drivers/net/vsockmon.c4
-rw-r--r--drivers/net/vxlan/vxlan_core.c16
-rw-r--r--drivers/net/vxlan/vxlan_private.h2
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c19
-rw-r--r--drivers/net/wireguard/device.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c115
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h13
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h23
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h12
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c12
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c44
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c354
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h126
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c72
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c12
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c60
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c2
-rw-r--r--drivers/net/wireless/ath/hw.c2
-rw-r--r--drivers/net/wireless/ath/key.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.c20
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.h2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c45
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c22
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h46
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.h6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h303
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c107
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c96
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c64
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c32
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h5
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/libertas_tf.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c458
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c31
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h57
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c73
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c69
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c76
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h49
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c36
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c202
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c104
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c7
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c66
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c89
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c62
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c20
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c5
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c12
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Kconfig5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig1
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c38
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c303
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c53
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h20
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h17
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c18
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h24
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c41
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h15
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c207
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig16
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile8
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c12
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c196
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c510
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h12
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c314
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h191
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c187
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c513
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h159
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c51
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h12
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c35
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c74
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h8
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h89
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c46
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c138
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c55
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c292
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c42
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c29
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.h24
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c211
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h20
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c848
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c418
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h23
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c93
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c52
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c264
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c151
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h67
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c338
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h23
-rw-r--r--drivers/net/wireless/rsi/rsi_debugfs.h1
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c5
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c2
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c4
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c11
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c47
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.h9
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c53
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_trace.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c34
-rw-r--r--drivers/net/xen-netback/hash.c5
1026 files changed, 56351 insertions, 10854 deletions
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 6d15ab3bfbbc..0433a0f36d1b 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -3098,9 +3098,9 @@ static void amt_link_setup(struct net_device *dev)
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->priv_flags |= IFF_NO_QUEUE;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
+ dev->netns_local = true;
dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index d5c56ca91b77..e057526448d7 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -68,6 +68,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
__be16 proto;
void *oiph;
int err;
+ int nh;
bareudp = rcu_dereference_sk_user_data(sk);
if (!bareudp)
@@ -83,7 +84,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
sizeof(ipversion))) {
- bareudp->dev->stats.rx_dropped++;
+ DEV_STATS_INC(bareudp->dev, rx_dropped);
goto drop;
}
ipversion >>= 4;
@@ -93,7 +94,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
} else if (ipversion == 6 && bareudp->multi_proto_mode) {
proto = htons(ETH_P_IPV6);
} else {
- bareudp->dev->stats.rx_dropped++;
+ DEV_STATS_INC(bareudp->dev, rx_dropped);
goto drop;
}
} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
@@ -107,7 +108,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
ipv4_is_multicast(tunnel_hdr->daddr)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- bareudp->dev->stats.rx_dropped++;
+ DEV_STATS_INC(bareudp->dev, rx_dropped);
goto drop;
}
} else {
@@ -123,7 +124,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
(addr_type & IPV6_ADDR_MULTICAST)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- bareudp->dev->stats.rx_dropped++;
+ DEV_STATS_INC(bareudp->dev, rx_dropped);
goto drop;
}
}
@@ -135,7 +136,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
proto,
!net_eq(bareudp->net,
dev_net(bareudp->dev)))) {
- bareudp->dev->stats.rx_dropped++;
+ DEV_STATS_INC(bareudp->dev, rx_dropped);
goto drop;
}
@@ -143,15 +144,30 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0);
if (!tun_dst) {
- bareudp->dev->stats.rx_dropped++;
+ DEV_STATS_INC(bareudp->dev, rx_dropped);
goto drop;
}
skb_dst_set(skb, &tun_dst->dst);
skb->dev = bareudp->dev;
- oiph = skb_network_header(skb);
- skb_reset_network_header(skb);
skb_reset_mac_header(skb);
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
+ skb_reset_network_header(skb);
+
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(bareudp->dev, rx_length_errors);
+ DEV_STATS_INC(bareudp->dev, rx_errors);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ oiph = skb->head + nh;
+
if (!ipv6_mod_enabled() || family == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
else
@@ -169,8 +185,8 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
&((struct ipv6hdr *)oiph)->saddr);
}
if (err > 1) {
- ++bareudp->dev->stats.rx_frame_errors;
- ++bareudp->dev->stats.rx_errors;
+ DEV_STATS_INC(bareudp->dev, rx_frame_errors);
+ DEV_STATS_INC(bareudp->dev, rx_errors);
goto drop;
}
}
@@ -301,6 +317,9 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be32 saddr;
int err;
+ if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
+ return -EINVAL;
+
if (!sock)
return -ESHUTDOWN;
@@ -368,6 +387,9 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
+ if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
+ return -EINVAL;
+
if (!sock)
return -ESHUTDOWN;
@@ -467,11 +489,11 @@ tx_error:
dev_kfree_skb(skb);
if (err == -ELOOP)
- dev->stats.collisions++;
+ DEV_STATS_INC(dev, collisions);
else if (err == -ENETUNREACH)
- dev->stats.tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
return NETDEV_TX_OK;
}
@@ -553,7 +575,6 @@ static void bareudp_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &bareudp_type);
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
- dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->hw_features |= NETIF_F_RXCSUM;
@@ -566,6 +587,7 @@ static void bareudp_setup(struct net_device *dev)
dev->type = ARPHRD_NONE;
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->lltx = true;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 1cd92c12e782..b1bffd8e9a95 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -419,6 +419,41 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
#ifdef CONFIG_XFRM_OFFLOAD
/**
+ * bond_ipsec_dev - Get active device for IPsec offload
+ * @xs: pointer to transformer state struct
+ *
+ * Context: caller must hold rcu_read_lock.
+ *
+ * Return: the device for ipsec offload, or NULL if not exist.
+ **/
+static struct net_device *bond_ipsec_dev(struct xfrm_state *xs)
+{
+ struct net_device *bond_dev = xs->xso.dev;
+ struct bonding *bond;
+ struct slave *slave;
+
+ if (!bond_dev)
+ return NULL;
+
+ bond = netdev_priv(bond_dev);
+ if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
+ return NULL;
+
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (!slave)
+ return NULL;
+
+ if (!xs->xso.real_dev)
+ return NULL;
+
+ if (xs->xso.real_dev != slave->dev)
+ pr_warn_ratelimited("%s: (slave %s): not same with IPsec offload real dev %s\n",
+ bond_dev->name, slave->dev->name, xs->xso.real_dev->name);
+
+ return slave->dev;
+}
+
+/**
* bond_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
@@ -427,6 +462,8 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
struct net_device *bond_dev = xs->xso.dev;
+ struct net_device *real_dev;
+ netdevice_tracker tracker;
struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
@@ -438,74 +475,80 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs,
rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
- if (!slave) {
- rcu_read_unlock();
- return -ENODEV;
+ real_dev = slave ? slave->dev : NULL;
+ netdev_hold(real_dev, &tracker, GFP_ATOMIC);
+ rcu_read_unlock();
+ if (!real_dev) {
+ err = -ENODEV;
+ goto out;
}
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
- netif_is_bond_master(slave->dev)) {
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_add ||
+ netif_is_bond_master(real_dev)) {
NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload");
- rcu_read_unlock();
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
- ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
+ ipsec = kmalloc(sizeof(*ipsec), GFP_KERNEL);
if (!ipsec) {
- rcu_read_unlock();
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out;
}
- xs->xso.real_dev = slave->dev;
- err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
+ xs->xso.real_dev = real_dev;
+ err = real_dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
if (!err) {
ipsec->xs = xs;
INIT_LIST_HEAD(&ipsec->list);
- spin_lock_bh(&bond->ipsec_lock);
+ mutex_lock(&bond->ipsec_lock);
list_add(&ipsec->list, &bond->ipsec_list);
- spin_unlock_bh(&bond->ipsec_lock);
+ mutex_unlock(&bond->ipsec_lock);
} else {
kfree(ipsec);
}
- rcu_read_unlock();
+out:
+ netdev_put(real_dev, &tracker);
return err;
}
static void bond_ipsec_add_sa_all(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
+ struct net_device *real_dev;
struct bond_ipsec *ipsec;
struct slave *slave;
- rcu_read_lock();
- slave = rcu_dereference(bond->curr_active_slave);
- if (!slave)
- goto out;
+ slave = rtnl_dereference(bond->curr_active_slave);
+ real_dev = slave ? slave->dev : NULL;
+ if (!real_dev)
+ return;
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
- netif_is_bond_master(slave->dev)) {
- spin_lock_bh(&bond->ipsec_lock);
+ mutex_lock(&bond->ipsec_lock);
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_add ||
+ netif_is_bond_master(real_dev)) {
if (!list_empty(&bond->ipsec_list))
- slave_warn(bond_dev, slave->dev,
+ slave_warn(bond_dev, real_dev,
"%s: no slave xdo_dev_state_add\n",
__func__);
- spin_unlock_bh(&bond->ipsec_lock);
goto out;
}
- spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
- ipsec->xs->xso.real_dev = slave->dev;
- if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
- slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
+ /* If new state is added before ipsec_lock acquired */
+ if (ipsec->xs->xso.real_dev == real_dev)
+ continue;
+
+ ipsec->xs->xso.real_dev = real_dev;
+ if (real_dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
+ slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__);
ipsec->xs->xso.real_dev = NULL;
}
}
- spin_unlock_bh(&bond->ipsec_lock);
out:
- rcu_read_unlock();
+ mutex_unlock(&bond->ipsec_lock);
}
/**
@@ -515,6 +558,8 @@ out:
static void bond_ipsec_del_sa(struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
+ struct net_device *real_dev;
+ netdevice_tracker tracker;
struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
@@ -525,6 +570,9 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs)
rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
+ real_dev = slave ? slave->dev : NULL;
+ netdev_hold(real_dev, &tracker, GFP_ATOMIC);
+ rcu_read_unlock();
if (!slave)
goto out;
@@ -532,18 +580,19 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs)
if (!xs->xso.real_dev)
goto out;
- WARN_ON(xs->xso.real_dev != slave->dev);
+ WARN_ON(xs->xso.real_dev != real_dev);
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
- netif_is_bond_master(slave->dev)) {
- slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_delete ||
+ netif_is_bond_master(real_dev)) {
+ slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__);
goto out;
}
- slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(xs);
out:
- spin_lock_bh(&bond->ipsec_lock);
+ netdev_put(real_dev, &tracker);
+ mutex_lock(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
if (ipsec->xs == xs) {
list_del(&ipsec->list);
@@ -551,41 +600,72 @@ out:
break;
}
}
- spin_unlock_bh(&bond->ipsec_lock);
- rcu_read_unlock();
+ mutex_unlock(&bond->ipsec_lock);
}
static void bond_ipsec_del_sa_all(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
+ struct net_device *real_dev;
struct bond_ipsec *ipsec;
struct slave *slave;
- rcu_read_lock();
- slave = rcu_dereference(bond->curr_active_slave);
- if (!slave) {
- rcu_read_unlock();
+ slave = rtnl_dereference(bond->curr_active_slave);
+ real_dev = slave ? slave->dev : NULL;
+ if (!real_dev)
return;
- }
- spin_lock_bh(&bond->ipsec_lock);
+ mutex_lock(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
if (!ipsec->xs->xso.real_dev)
continue;
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
- netif_is_bond_master(slave->dev)) {
- slave_warn(bond_dev, slave->dev,
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_delete ||
+ netif_is_bond_master(real_dev)) {
+ slave_warn(bond_dev, real_dev,
"%s: no slave xdo_dev_state_delete\n",
__func__);
} else {
- slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
+ if (real_dev->xfrmdev_ops->xdo_dev_state_free)
+ real_dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs);
}
- ipsec->xs->xso.real_dev = NULL;
}
- spin_unlock_bh(&bond->ipsec_lock);
+ mutex_unlock(&bond->ipsec_lock);
+}
+
+static void bond_ipsec_free_sa(struct xfrm_state *xs)
+{
+ struct net_device *bond_dev = xs->xso.dev;
+ struct net_device *real_dev;
+ netdevice_tracker tracker;
+ struct bonding *bond;
+ struct slave *slave;
+
+ if (!bond_dev)
+ return;
+
+ rcu_read_lock();
+ bond = netdev_priv(bond_dev);
+ slave = rcu_dereference(bond->curr_active_slave);
+ real_dev = slave ? slave->dev : NULL;
+ netdev_hold(real_dev, &tracker, GFP_ATOMIC);
rcu_read_unlock();
+
+ if (!slave)
+ goto out;
+
+ if (!xs->xso.real_dev)
+ goto out;
+
+ WARN_ON(xs->xso.real_dev != real_dev);
+
+ if (real_dev && real_dev->xfrmdev_ops &&
+ real_dev->xfrmdev_ops->xdo_dev_state_free)
+ real_dev->xfrmdev_ops->xdo_dev_state_free(xs);
+out:
+ netdev_put(real_dev, &tracker);
}
/**
@@ -595,44 +675,80 @@ static void bond_ipsec_del_sa_all(struct bonding *bond)
**/
static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
- struct net_device *bond_dev = xs->xso.dev;
struct net_device *real_dev;
- struct slave *curr_active;
- struct bonding *bond;
- int err;
+ bool ok = false;
- bond = netdev_priv(bond_dev);
rcu_read_lock();
- curr_active = rcu_dereference(bond->curr_active_slave);
- real_dev = curr_active->dev;
+ real_dev = bond_ipsec_dev(xs);
+ if (!real_dev)
+ goto out;
- if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
- err = false;
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
+ netif_is_bond_master(real_dev))
goto out;
- }
- if (!xs->xso.real_dev) {
- err = false;
+ ok = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+out:
+ rcu_read_unlock();
+ return ok;
+}
+
+/**
+ * bond_advance_esn_state - ESN support for IPSec HW offload
+ * @xs: pointer to transformer state struct
+ **/
+static void bond_advance_esn_state(struct xfrm_state *xs)
+{
+ struct net_device *real_dev;
+
+ rcu_read_lock();
+ real_dev = bond_ipsec_dev(xs);
+ if (!real_dev)
+ goto out;
+
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
+ pr_warn_ratelimited("%s: %s doesn't support xdo_dev_state_advance_esn\n", __func__, real_dev->name);
goto out;
}
+ real_dev->xfrmdev_ops->xdo_dev_state_advance_esn(xs);
+out:
+ rcu_read_unlock();
+}
+
+/**
+ * bond_xfrm_update_stats - Update xfrm state
+ * @xs: pointer to transformer state struct
+ **/
+static void bond_xfrm_update_stats(struct xfrm_state *xs)
+{
+ struct net_device *real_dev;
+
+ rcu_read_lock();
+ real_dev = bond_ipsec_dev(xs);
+ if (!real_dev)
+ goto out;
+
if (!real_dev->xfrmdev_ops ||
- !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
- netif_is_bond_master(real_dev)) {
- err = false;
+ !real_dev->xfrmdev_ops->xdo_dev_state_update_stats) {
+ pr_warn_ratelimited("%s: %s doesn't support xdo_dev_state_update_stats\n", __func__, real_dev->name);
goto out;
}
- err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+ real_dev->xfrmdev_ops->xdo_dev_state_update_stats(xs);
out:
rcu_read_unlock();
- return err;
}
static const struct xfrmdev_ops bond_xfrmdev_ops = {
.xdo_dev_state_add = bond_ipsec_add_sa,
.xdo_dev_state_delete = bond_ipsec_del_sa,
+ .xdo_dev_state_free = bond_ipsec_free_sa,
.xdo_dev_offload_ok = bond_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = bond_advance_esn_state,
+ .xdo_dev_state_update_stats = bond_xfrm_update_stats,
};
#endif /* CONFIG_XFRM_OFFLOAD */
@@ -2258,7 +2374,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
goto err_sysfs_del;
}
- res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ res = dev_xdp_propagate(slave_dev, &xdp);
if (res < 0) {
/* ndo_bpf() sets extack error message */
slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
@@ -2394,7 +2510,7 @@ static int __bond_release_one(struct net_device *bond_dev,
.prog = NULL,
.extack = NULL,
};
- if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
+ if (dev_xdp_propagate(slave_dev, &xdp))
slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
}
@@ -5494,9 +5610,9 @@ bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
break;
default:
- /* Should never happen. Mode guarded by bond_xdp_check() */
- netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
- WARN_ON_ONCE(1);
+ if (net_ratelimit())
+ netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n",
+ BOND_MODE(bond));
return NULL;
}
@@ -5584,7 +5700,7 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
goto err;
}
- err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ err = dev_xdp_propagate(slave_dev, &xdp);
if (err < 0) {
/* ndo_bpf() sets extack error message */
slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
@@ -5616,7 +5732,7 @@ err:
if (slave == rollback_slave)
break;
- err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ err_unwind = dev_xdp_propagate(slave_dev, &xdp);
if (err_unwind < 0)
slave_err(dev, slave_dev,
"Error %d when unwinding XDP program change\n", err_unwind);
@@ -5770,9 +5886,6 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
if (real_dev) {
ret = ethtool_get_ts_info_by_layer(real_dev, info);
} else {
- info->phc_index = -1;
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
/* Check if all slaves support software tx timestamping */
rcu_read_lock();
bond_for_each_slave_rcu(bond, slave, iter) {
@@ -5882,11 +5995,14 @@ void bond_setup(struct net_device *bond_dev)
/* set up xfrm device ops (only supported in active-backup right now) */
bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
INIT_LIST_HEAD(&bond->ipsec_list);
- spin_lock_init(&bond->ipsec_lock);
+ mutex_init(&bond->ipsec_lock);
#endif /* CONFIG_XFRM_OFFLOAD */
/* don't acquire bond device's netif_tx_lock when transmitting */
- bond_dev->features |= NETIF_F_LLTX;
+ bond_dev->lltx = true;
+
+ /* Don't allow bond devices to change network namespaces. */
+ bond_dev->netns_local = true;
/* By default, we declare the bond to be fully
* VLAN hardware accelerated capable. Special
@@ -5895,9 +6011,6 @@ void bond_setup(struct net_device *bond_dev)
* capable
*/
- /* Don't allow bond devices to change network namespaces. */
- bond_dev->features |= NETIF_F_NETNS_LOCAL;
-
bond_dev->hw_features = BOND_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER |
@@ -5931,6 +6044,10 @@ static void bond_uninit(struct net_device *bond_dev)
__bond_release_one(bond_dev, slave->dev, true, true);
netdev_info(bond_dev, "Released all slaves\n");
+#ifdef CONFIG_XFRM_OFFLOAD
+ mutex_destroy(&bond->ipsec_lock);
+#endif /* CONFIG_XFRM_OFFLOAD */
+
bond_set_slave_arr(bond, NULL, NULL);
list_del_rcu(&bond->bond_list);
@@ -6338,7 +6455,8 @@ static int bond_init(struct net_device *bond_dev)
netdev_dbg(bond_dev, "Begin bond_init\n");
- bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
+ bond->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ bond_dev->name);
if (!bond->wq)
return -ENOMEM;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index bc80fb6397dc..95d59a18c022 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -936,7 +936,7 @@ static int bond_option_active_slave_set(struct bonding *bond,
/* check to see if we are clearing active */
if (!slave_dev) {
netdev_dbg(bond->dev, "Clearing current active slave\n");
- RCU_INIT_POINTER(bond->curr_active_slave, NULL);
+ bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
} else {
struct slave *old_active = rtnl_dereference(bond->curr_active_slave);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 7f9b60a42d29..cf989bea9aa3 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -225,6 +225,7 @@ source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/peak_canfd/Kconfig"
source "drivers/net/can/rcar/Kconfig"
+source "drivers/net/can/rockchip/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
source "drivers/net/can/softing/Kconfig"
source "drivers/net/can/spi/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 4669cd51e7bf..a71db2cfe990 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_CAN_SLCAN) += slcan/
obj-y += dev/
obj-y += esd/
obj-y += rcar/
+obj-y += rockchip/
obj-y += spi/
obj-y += usb/
obj-y += softing/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 11f434d708b3..191707d7e3da 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1191,7 +1191,7 @@ MODULE_DEVICE_TABLE(platform, at91_can_id_table);
static struct platform_driver at91_can_driver = {
.probe = at91_can_probe,
- .remove_new = at91_can_remove,
+ .remove = at91_can_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(at91_can_dt_ids),
diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c
index 49cf9682b925..bfc60eb33dc3 100644
--- a/drivers/net/can/bxcan.c
+++ b/drivers/net/can/bxcan.c
@@ -1092,7 +1092,7 @@ static struct platform_driver bxcan_driver = {
.of_match_table = bxcan_of_match,
},
.probe = bxcan_probe,
- .remove_new = bxcan_remove,
+ .remove = bxcan_remove,
};
module_platform_driver(bxcan_driver);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index e2ec69aa46e5..6cba9717a6d8 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -476,7 +476,7 @@ static struct platform_driver c_can_plat_driver = {
.of_match_table = c_can_of_table,
},
.probe = c_can_plat_probe,
- .remove_new = c_can_plat_remove,
+ .remove = c_can_plat_remove,
.suspend = c_can_suspend,
.resume = c_can_resume,
.id_table = c_can_id_table,
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index 22009440a983..d06762817153 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -307,7 +307,7 @@ static void cc770_isa_remove(struct platform_device *pdev)
static struct platform_driver cc770_isa_driver = {
.probe = cc770_isa_probe,
- .remove_new = cc770_isa_remove,
+ .remove = cc770_isa_remove,
.driver = {
.name = KBUILD_MODNAME,
},
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 13bcfba05f18..b6c4f02ffb97 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -70,17 +70,10 @@ static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg,
static int cc770_get_of_node_data(struct platform_device *pdev,
struct cc770_priv *priv)
{
+ u32 clkext = CC770_PLATFORM_CAN_CLOCK, clkout = 0;
struct device_node *np = pdev->dev.of_node;
- const u32 *prop;
- int prop_size;
- u32 clkext;
-
- prop = of_get_property(np, "bosch,external-clock-frequency",
- &prop_size);
- if (prop && (prop_size == sizeof(u32)))
- clkext = *prop;
- else
- clkext = CC770_PLATFORM_CAN_CLOCK; /* default */
+
+ of_property_read_u32(np, "bosch,external-clock-frequency", &clkext);
priv->can.clock.freq = clkext;
/* The system clock may not exceed 10 MHz */
@@ -98,7 +91,7 @@ static int cc770_get_of_node_data(struct platform_device *pdev,
if (of_property_read_bool(np, "bosch,iso-low-speed-mux"))
priv->cpu_interface |= CPUIF_MUX;
- if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
+ if (!of_property_read_bool(np, "bosch,no-comperator-bypass"))
priv->bus_config |= BUSCFG_CBY;
if (of_property_read_bool(np, "bosch,disconnect-rx0-input"))
priv->bus_config |= BUSCFG_DR0;
@@ -109,25 +102,22 @@ static int cc770_get_of_node_data(struct platform_device *pdev,
if (of_property_read_bool(np, "bosch,polarity-dominant"))
priv->bus_config |= BUSCFG_POL;
- prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
- if (prop && (prop_size == sizeof(u32)) && *prop > 0) {
- u32 cdv = clkext / *prop;
- int slew;
+ of_property_read_u32(np, "bosch,clock-out-frequency", &clkout);
+ if (clkout > 0) {
+ u32 cdv = clkext / clkout;
if (cdv > 0 && cdv < 16) {
+ u32 slew;
+
priv->cpu_interface |= CPUIF_CEN;
priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK;
- prop = of_get_property(np, "bosch,slew-rate",
- &prop_size);
- if (prop && (prop_size == sizeof(u32))) {
- slew = *prop;
- } else {
+ if (of_property_read_u32(np, "bosch,slew-rate", &slew)) {
/* Determine default slew rate */
slew = (CLKOUT_SL_MASK >>
CLKOUT_SL_SHIFT) -
((cdv * clkext - 1) / 8000000);
- if (slew < 0)
+ if (slew > (CLKOUT_SL_MASK >> CLKOUT_SL_SHIFT))
slew = 0;
}
priv->clkout |= (slew << CLKOUT_SL_SHIFT) &
@@ -257,7 +247,7 @@ static struct platform_driver cc770_platform_driver = {
.of_match_table = cc770_platform_table,
},
.probe = cc770_platform_probe,
- .remove_new = cc770_platform_remove,
+ .remove = cc770_platform_remove,
};
module_platform_driver(cc770_platform_driver);
diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c
index 55bb10b157b4..70e2577c8541 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_platform.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c
@@ -111,7 +111,7 @@ MODULE_DEVICE_TABLE(of, ctucan_of_match);
static struct platform_driver ctucanfd_driver = {
.probe = ctucan_platform_probe,
- .remove_new = ctucan_platform_remove,
+ .remove = ctucan_platform_remove,
.driver = {
.name = DRV_NAME,
.pm = &ctucan_platform_pm_ops,
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 87828f953073..6792c14fd7eb 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -380,12 +380,9 @@ int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index dfdc039d92a6..01aacdcda260 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -65,15 +65,6 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
if (!data)
return 0;
- if (data[IFLA_CAN_BITTIMING]) {
- struct can_bittiming bt;
-
- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- err = can_validate_bittiming(&bt, extack);
- if (err)
- return err;
- }
-
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
@@ -114,6 +105,15 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
}
}
+ if (data[IFLA_CAN_BITTIMING]) {
+ struct can_bittiming bt;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+ err = can_validate_bittiming(&bt, extack);
+ if (err)
+ return err;
+ }
+
if (is_can_fd) {
if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
return -EOPNOTSUPP;
@@ -195,48 +195,6 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
/* We need synchronization with dev->stop() */
ASSERT_RTNL();
- if (data[IFLA_CAN_BITTIMING]) {
- struct can_bittiming bt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->bittiming_const && !priv->do_set_bittiming &&
- !priv->bitrate_const)
- return -EOPNOTSUPP;
-
- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- err = can_get_bittiming(dev, &bt,
- priv->bittiming_const,
- priv->bitrate_const,
- priv->bitrate_const_cnt,
- extack);
- if (err)
- return err;
-
- if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
- NL_SET_ERR_MSG_FMT(extack,
- "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps",
- bt.bitrate, priv->bitrate_max);
- return -EINVAL;
- }
-
- memcpy(&priv->bittiming, &bt, sizeof(bt));
-
- if (priv->do_set_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->do_set_bittiming(dev);
- if (err)
- return err;
- }
- }
-
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm;
u32 ctrlstatic;
@@ -284,6 +242,48 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK;
}
+ if (data[IFLA_CAN_BITTIMING]) {
+ struct can_bittiming bt;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->bittiming_const && !priv->do_set_bittiming &&
+ !priv->bitrate_const)
+ return -EOPNOTSUPP;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+ err = can_get_bittiming(dev, &bt,
+ priv->bittiming_const,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt,
+ extack);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps",
+ bt.bitrate, priv->bitrate_max);
+ return -EINVAL;
+ }
+
+ memcpy(&priv->bittiming, &bt, sizeof(bt));
+
+ if (priv->do_set_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = priv->do_set_bittiming(dev);
+ if (err)
+ return err;
+ }
+ }
+
if (data[IFLA_CAN_RESTART_MS]) {
/* Do not allow changing restart delay while running */
if (dev->flags & IFF_UP)
diff --git a/drivers/net/can/esd/esd_402_pci-core.c b/drivers/net/can/esd/esd_402_pci-core.c
index b7cdcffd0e45..5d6d2828cd04 100644
--- a/drivers/net/can/esd/esd_402_pci-core.c
+++ b/drivers/net/can/esd/esd_402_pci-core.c
@@ -369,12 +369,13 @@ static int pci402_init_cores(struct pci_dev *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
priv = netdev_priv(netdev);
+ priv->can.clock.freq = card->ov.core_frequency;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_CC_LEN8_DLC;
-
- priv->can.clock.freq = card->ov.core_frequency;
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_DAR)
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD)
priv->can.bittiming_const = &pci402_bittiming_const_canfd;
else
diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c
index 121cbbf81458..c80032bc1a52 100644
--- a/drivers/net/can/esd/esdacc.c
+++ b/drivers/net/can/esd/esdacc.c
@@ -17,6 +17,9 @@
/* esdACC DLC register layout */
#define ACC_DLC_DLC_MASK GENMASK(3, 0)
#define ACC_DLC_RTR_FLAG BIT(4)
+#define ACC_DLC_SSTX_FLAG BIT(24) /* Single Shot TX */
+
+/* esdACC DLC in struct acc_bmmsg_rxtxdone::acc_dlc.len only! */
#define ACC_DLC_TXD_FLAG BIT(5)
/* ecc value of esdACC equals SJA1000's ECC register */
@@ -43,8 +46,8 @@
static void acc_resetmode_enter(struct acc_core *core)
{
- acc_set_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+ acc_set_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_RESETMODE);
/* Read back reset mode bit to flush PCI write posting */
acc_resetmode_entered(core);
@@ -52,14 +55,14 @@ static void acc_resetmode_enter(struct acc_core *core)
static void acc_resetmode_leave(struct acc_core *core)
{
- acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_RESETMODE);
/* Read back reset mode bit to flush PCI write posting */
acc_resetmode_entered(core);
}
-static void acc_txq_put(struct acc_core *core, u32 acc_id, u8 acc_dlc,
+static void acc_txq_put(struct acc_core *core, u32 acc_id, u32 acc_dlc,
const void *data)
{
acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_1,
@@ -172,7 +175,7 @@ int acc_open(struct net_device *netdev)
struct acc_net_priv *priv = netdev_priv(netdev);
struct acc_core *core = priv->core;
u32 tx_fifo_status;
- u32 ctrl_mode;
+ u32 ctrl;
int err;
/* Retry to enter RESET mode if out of sync. */
@@ -187,19 +190,19 @@ int acc_open(struct net_device *netdev)
if (err)
return err;
- ctrl_mode = ACC_REG_CONTROL_MASK_IE_RXTX |
- ACC_REG_CONTROL_MASK_IE_TXERROR |
- ACC_REG_CONTROL_MASK_IE_ERRWARN |
- ACC_REG_CONTROL_MASK_IE_OVERRUN |
- ACC_REG_CONTROL_MASK_IE_ERRPASS;
+ ctrl = ACC_REG_CTRL_MASK_IE_RXTX |
+ ACC_REG_CTRL_MASK_IE_TXERROR |
+ ACC_REG_CTRL_MASK_IE_ERRWARN |
+ ACC_REG_CTRL_MASK_IE_OVERRUN |
+ ACC_REG_CTRL_MASK_IE_ERRPASS;
if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
- ctrl_mode |= ACC_REG_CONTROL_MASK_IE_BUSERR;
+ ctrl |= ACC_REG_CTRL_MASK_IE_BUSERR;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- ctrl_mode |= ACC_REG_CONTROL_MASK_MODE_LOM;
+ ctrl |= ACC_REG_CTRL_MASK_LOM;
- acc_set_bits(core, ACC_CORE_OF_CTRL_MODE, ctrl_mode);
+ acc_set_bits(core, ACC_CORE_OF_CTRL, ctrl);
acc_resetmode_leave(core);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -218,13 +221,13 @@ int acc_close(struct net_device *netdev)
struct acc_net_priv *priv = netdev_priv(netdev);
struct acc_core *core = priv->core;
- acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_IE_RXTX |
- ACC_REG_CONTROL_MASK_IE_TXERROR |
- ACC_REG_CONTROL_MASK_IE_ERRWARN |
- ACC_REG_CONTROL_MASK_IE_OVERRUN |
- ACC_REG_CONTROL_MASK_IE_ERRPASS |
- ACC_REG_CONTROL_MASK_IE_BUSERR);
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_IE_RXTX |
+ ACC_REG_CTRL_MASK_IE_TXERROR |
+ ACC_REG_CTRL_MASK_IE_ERRWARN |
+ ACC_REG_CTRL_MASK_IE_OVERRUN |
+ ACC_REG_CTRL_MASK_IE_ERRPASS |
+ ACC_REG_CTRL_MASK_IE_BUSERR);
netif_stop_queue(netdev);
acc_resetmode_enter(core);
@@ -233,9 +236,9 @@ int acc_close(struct net_device *netdev)
/* Mark pending TX requests to be aborted after controller restart. */
acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
- /* ACC_REG_CONTROL_MASK_MODE_LOM is only accessible in RESET mode */
- acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_MODE_LOM);
+ /* ACC_REG_CTRL_MASK_LOM is only accessible in RESET mode */
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_LOM);
close_candev(netdev);
return 0;
@@ -249,7 +252,7 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
u8 tx_fifo_head = core->tx_fifo_head;
int fifo_usage;
u32 acc_id;
- u8 acc_dlc;
+ u32 acc_dlc;
if (can_dropped_invalid_skb(netdev, skb))
return NETDEV_TX_OK;
@@ -274,6 +277,8 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
if (cf->can_id & CAN_RTR_FLAG)
acc_dlc |= ACC_DLC_RTR_FLAG;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ acc_dlc |= ACC_DLC_SSTX_FLAG;
if (cf->can_id & CAN_EFF_FLAG) {
acc_id = cf->can_id & CAN_EFF_MASK;
diff --git a/drivers/net/can/esd/esdacc.h b/drivers/net/can/esd/esdacc.h
index a70488b25d39..6b7ebd8c91b2 100644
--- a/drivers/net/can/esd/esdacc.h
+++ b/drivers/net/can/esd/esdacc.h
@@ -35,6 +35,7 @@
*/
#define ACC_OV_REG_FEAT_MASK_CANFD BIT(27 - 16)
#define ACC_OV_REG_FEAT_MASK_NEW_PSC BIT(28 - 16)
+#define ACC_OV_REG_FEAT_MASK_DAR BIT(30 - 16)
#define ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE BIT(0)
#define ACC_OV_REG_MODE_MASK_BM_ENABLE BIT(1)
@@ -50,7 +51,7 @@
#define ACC_OV_REG_MODE_MASK_FPGA_RESET BIT(31)
/* esdACC CAN Core Module */
-#define ACC_CORE_OF_CTRL_MODE 0x0000
+#define ACC_CORE_OF_CTRL 0x0000
#define ACC_CORE_OF_STATUS_IRQ 0x0008
#define ACC_CORE_OF_BRP 0x000c
#define ACC_CORE_OF_BTR 0x0010
@@ -66,21 +67,22 @@
#define ACC_CORE_OF_TXFIFO_DATA_0 0x00c8
#define ACC_CORE_OF_TXFIFO_DATA_1 0x00cc
-#define ACC_REG_CONTROL_MASK_MODE_RESETMODE BIT(0)
-#define ACC_REG_CONTROL_MASK_MODE_LOM BIT(1)
-#define ACC_REG_CONTROL_MASK_MODE_STM BIT(2)
-#define ACC_REG_CONTROL_MASK_MODE_TRANSEN BIT(5)
-#define ACC_REG_CONTROL_MASK_MODE_TS BIT(6)
-#define ACC_REG_CONTROL_MASK_MODE_SCHEDULE BIT(7)
-
-#define ACC_REG_CONTROL_MASK_IE_RXTX BIT(8)
-#define ACC_REG_CONTROL_MASK_IE_TXERROR BIT(9)
-#define ACC_REG_CONTROL_MASK_IE_ERRWARN BIT(10)
-#define ACC_REG_CONTROL_MASK_IE_OVERRUN BIT(11)
-#define ACC_REG_CONTROL_MASK_IE_TSI BIT(12)
-#define ACC_REG_CONTROL_MASK_IE_ERRPASS BIT(13)
-#define ACC_REG_CONTROL_MASK_IE_ALI BIT(14)
-#define ACC_REG_CONTROL_MASK_IE_BUSERR BIT(15)
+/* CTRL register layout */
+#define ACC_REG_CTRL_MASK_RESETMODE BIT(0)
+#define ACC_REG_CTRL_MASK_LOM BIT(1)
+#define ACC_REG_CTRL_MASK_STM BIT(2)
+#define ACC_REG_CTRL_MASK_TRANSEN BIT(5)
+#define ACC_REG_CTRL_MASK_TS BIT(6)
+#define ACC_REG_CTRL_MASK_SCHEDULE BIT(7)
+
+#define ACC_REG_CTRL_MASK_IE_RXTX BIT(8)
+#define ACC_REG_CTRL_MASK_IE_TXERROR BIT(9)
+#define ACC_REG_CTRL_MASK_IE_ERRWARN BIT(10)
+#define ACC_REG_CTRL_MASK_IE_OVERRUN BIT(11)
+#define ACC_REG_CTRL_MASK_IE_TSI BIT(12)
+#define ACC_REG_CTRL_MASK_IE_ERRPASS BIT(13)
+#define ACC_REG_CTRL_MASK_IE_ALI BIT(14)
+#define ACC_REG_CTRL_MASK_IE_BUSERR BIT(15)
/* BRP and BTR register layout for CAN-Classic version */
#define ACC_REG_BRP_CL_MASK_BRP GENMASK(8, 0)
@@ -300,9 +302,9 @@ static inline void acc_clear_bits(struct acc_core *core,
static inline int acc_resetmode_entered(struct acc_core *core)
{
- u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL_MODE);
+ u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL);
- return (ctrl & ACC_REG_CONTROL_MASK_MODE_RESETMODE) != 0;
+ return (ctrl & ACC_REG_CTRL_MASK_RESETMODE) != 0;
}
static inline u32 acc_ov_read32(struct acc_ov *ov, unsigned short offs)
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index 8ea7f2795551..ac1a860986df 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -354,6 +354,14 @@ static struct flexcan_devtype_data fsl_imx93_devtype_data = {
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
+static const struct flexcan_devtype_data fsl_imx95_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI,
+};
+
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
@@ -544,6 +552,13 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
} else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+ } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI) {
+ /* For the SCMI mode, driver do nothing, ATF will send request to
+ * SM(system manager, M33 core) through SCMI protocol after linux
+ * suspend. Once SM get this request, it will send IPG_STOP signal
+ * to Flex_CAN, let CAN in STOP mode.
+ */
+ return 0;
}
return flexcan_low_power_enter_ack(priv);
@@ -555,7 +570,11 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
u32 reg_mcr;
int ret;
- /* remove stop request */
+ /* Remove stop request, for FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI,
+ * do nothing here, because ATF already send request to SM before
+ * linux resume. Once SM get this request, it will deassert the
+ * IPG_STOP signal to Flex_CAN.
+ */
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
ret = flexcan_stop_mode_enable_scfw(priv, false);
if (ret < 0)
@@ -1983,6 +2002,9 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
ret = flexcan_setup_stop_mode_scfw(pdev);
else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
ret = flexcan_setup_stop_mode_gpr(pdev);
+ else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)
+ /* ATF will handle all STOP_IPG related work */
+ ret = 0;
else
/* return 0 directly if doesn't support stop mode feature */
return 0;
@@ -2009,6 +2031,7 @@ static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, },
{ .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, },
{ .compatible = "fsl,imx93-flexcan", .data = &fsl_imx93_devtype_data, },
+ { .compatible = "fsl,imx95-flexcan", .data = &fsl_imx95_devtype_data, },
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
{ .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
@@ -2309,9 +2332,19 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
if (device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, true);
- err = pm_runtime_force_suspend(device);
- if (err)
- return err;
+ /* For FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI, it need ATF to send
+ * to SM through SCMI protocol, SM will assert the IPG_STOP
+ * signal. But all this works need the CAN clocks keep on.
+ * After the CAN module get the IPG_STOP mode, and switch to
+ * STOP mode, whether still keep the CAN clocks on or gate them
+ * off depend on the Hardware design.
+ */
+ if (!(device_may_wakeup(device) &&
+ priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)) {
+ err = pm_runtime_force_suspend(device);
+ if (err)
+ return err;
+ }
}
return 0;
@@ -2325,9 +2358,12 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
if (netif_running(dev)) {
int err;
- err = pm_runtime_force_resume(device);
- if (err)
- return err;
+ if (!(device_may_wakeup(device) &&
+ priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)) {
+ err = pm_runtime_force_resume(device);
+ if (err)
+ return err;
+ }
if (device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, false);
@@ -2349,7 +2385,7 @@ static struct platform_driver flexcan_driver = {
.of_match_table = flexcan_of_match,
},
.probe = flexcan_probe,
- .remove_new = flexcan_remove,
+ .remove = flexcan_remove,
.id_table = flexcan_id_table,
};
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index 025c3417031f..4933d8c7439e 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -68,6 +68,8 @@
#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15)
/* Device supports RX via FIFO */
#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
+/* Setup stop mode with ATF SCMI protocol to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI BIT(17)
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 6d3ba71a6a73..cdf0ec9fa7f3 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1725,7 +1725,7 @@ static struct platform_driver grcan_driver = {
.of_match_table = grcan_match,
},
.probe = grcan_probe,
- .remove_new = grcan_remove,
+ .remove = grcan_remove,
};
module_platform_driver(grcan_driver);
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 72307297d75e..d32b10900d2f 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -1033,7 +1033,7 @@ static struct platform_driver ifi_canfd_plat_driver = {
.of_match_table = ifi_canfd_of_table,
},
.probe = ifi_canfd_plat_probe,
- .remove_new = ifi_canfd_plat_remove,
+ .remove = ifi_canfd_plat_remove,
};
module_platform_driver(ifi_canfd_plat_driver);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index d048ea565b89..60c7b83b4539 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -2049,7 +2049,7 @@ static struct platform_driver ican3_driver = {
.name = DRV_NAME,
},
.probe = ican3_probe,
- .remove_new = ican3_remove,
+ .remove = ican3_remove,
};
module_platform_driver(ican3_driver);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index a60d9efd5f8d..fee012b57f33 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -1053,13 +1053,13 @@ static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
void __iomem *serdes_base;
u32 word1, word2;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- word1 = addr | KVASER_PCIEFD_ALTERA_DMA_64BIT;
- word2 = addr >> 32;
-#else
- word1 = addr;
- word2 = 0;
-#endif
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) {
+ word1 = lower_32_bits(addr) | KVASER_PCIEFD_ALTERA_DMA_64BIT;
+ word2 = upper_32_bits(addr);
+ } else {
+ word1 = addr;
+ word2 = 0;
+ }
serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
iowrite32(word1, serdes_base);
iowrite32(word2, serdes_base + 0x4);
@@ -1072,9 +1072,9 @@ static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK;
u32 msb = 0x0;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- msb = addr >> 32;
-#endif
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ msb = upper_32_bits(addr);
+
serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index;
iowrite32(lsb, serdes_base);
iowrite32(msb, serdes_base + 0x4);
@@ -1087,9 +1087,9 @@ static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK;
u32 msb = 0x0;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- msb = addr >> 32;
-#endif
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ msb = upper_32_bits(addr);
+
serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
iowrite32(msb, serdes_base);
iowrite32(lsb, serdes_base + 0x4);
@@ -1104,6 +1104,9 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
/* Disable the DMA */
iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
+
+ dma_set_mask_and_coherent(&pcie->pci->dev, DMA_BIT_MASK(64));
+
for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev,
KVASER_PCIEFD_DMA_SIZE,
@@ -1686,6 +1689,7 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
u32 srb_irq = 0;
+ u32 srb_release = 0;
int i;
if (!(pci_irq & irq_mask->all))
@@ -1699,17 +1703,14 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
kvaser_pciefd_transmit_irq(pcie->can[i]);
}
- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
- /* Reset DMA buffer 0, may trigger new interrupt */
- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
- }
+ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
+ srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
- /* Reset DMA buffer 1, may trigger new interrupt */
- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
- }
+ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
+ srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
+
+ if (srb_release)
+ iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
return IRQ_HANDLED;
}
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 7f63f866083e..a978b960f1f1 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -483,11 +483,10 @@ static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
{
m_can_coalescing_disable(cdev);
m_can_write(cdev, M_CAN_ILE, 0x0);
- cdev->active_interrupts = 0x0;
if (!cdev->net->irq) {
dev_dbg(cdev->dev, "Stop hrtimer\n");
- hrtimer_cancel(&cdev->hrtimer);
+ hrtimer_try_to_cancel(&cdev->hrtimer);
}
}
@@ -1037,22 +1036,6 @@ end:
return work_done;
}
-static int m_can_rx_peripheral(struct net_device *dev, u32 irqstatus)
-{
- struct m_can_classdev *cdev = netdev_priv(dev);
- int work_done;
-
- work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, irqstatus);
-
- /* Don't re-enable interrupts if the driver had a fatal error
- * (e.g., FIFO read failure).
- */
- if (work_done < 0)
- m_can_disable_all_interrupts(cdev);
-
- return work_done;
-}
-
static int m_can_poll(struct napi_struct *napi, int quota)
{
struct net_device *dev = napi->dev;
@@ -1217,16 +1200,18 @@ static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
HRTIMER_MODE_REL);
}
-static irqreturn_t m_can_isr(int irq, void *dev_id)
+/* This interrupt handler is called either from the interrupt thread or a
+ * hrtimer. This has implications like cancelling a timer won't be possible
+ * blocking.
+ */
+static int m_can_interrupt_handler(struct m_can_classdev *cdev)
{
- struct net_device *dev = (struct net_device *)dev_id;
- struct m_can_classdev *cdev = netdev_priv(dev);
+ struct net_device *dev = cdev->net;
u32 ir;
+ int ret;
- if (pm_runtime_suspended(cdev->dev)) {
- m_can_coalescing_disable(cdev);
+ if (pm_runtime_suspended(cdev->dev))
return IRQ_NONE;
- }
ir = m_can_read(cdev, M_CAN_IR);
m_can_coalescing_update(cdev, ir);
@@ -1250,11 +1235,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
m_can_disable_all_interrupts(cdev);
napi_schedule(&cdev->napi);
} else {
- int pkts;
-
- pkts = m_can_rx_peripheral(dev, ir);
- if (pkts < 0)
- goto out_fail;
+ ret = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, ir);
+ if (ret < 0)
+ return ret;
}
}
@@ -1272,8 +1255,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
} else {
if (ir & (IR_TEFN | IR_TEFW)) {
/* New TX FIFO Element arrived */
- if (m_can_echo_tx_event(dev) != 0)
- goto out_fail;
+ ret = m_can_echo_tx_event(dev);
+ if (ret != 0)
+ return ret;
}
}
@@ -1281,16 +1265,31 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
can_rx_offload_threaded_irq_finish(&cdev->offload);
return IRQ_HANDLED;
+}
-out_fail:
- m_can_disable_all_interrupts(cdev);
- return IRQ_HANDLED;
+static irqreturn_t m_can_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ int ret;
+
+ ret = m_can_interrupt_handler(cdev);
+ if (ret < 0) {
+ m_can_disable_all_interrupts(cdev);
+ return IRQ_HANDLED;
+ }
+
+ return ret;
}
static enum hrtimer_restart m_can_coalescing_timer(struct hrtimer *timer)
{
struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer);
+ if (cdev->can.state == CAN_STATE_BUS_OFF ||
+ cdev->can.state == CAN_STATE_STOPPED)
+ return HRTIMER_NORESTART;
+
irq_wake_thread(cdev->net->irq, cdev->net);
return HRTIMER_NORESTART;
@@ -1435,7 +1434,8 @@ static int m_can_chip_config(struct net_device *dev)
/* Disable unused interrupts */
interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF |
- IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F);
+ IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F |
+ IR_TSW);
err = m_can_config_enable(cdev);
if (err)
@@ -1542,6 +1542,7 @@ static int m_can_chip_config(struct net_device *dev)
else
interrupts &= ~(IR_ERR_LEC_31X);
}
+ cdev->active_interrupts = 0;
m_can_interrupt_enable(cdev, interrupts);
/* route all interrupts to INT0 */
@@ -1763,11 +1764,7 @@ static int m_can_close(struct net_device *dev)
netif_stop_queue(dev);
- if (!cdev->is_peripheral)
- napi_disable(&cdev->napi);
-
m_can_stop(dev);
- m_can_clk_stop(cdev);
free_irq(dev->irq, dev);
m_can_clean(dev);
@@ -1776,10 +1773,13 @@ static int m_can_close(struct net_device *dev)
destroy_workqueue(cdev->tx_wq);
cdev->tx_wq = NULL;
can_rx_offload_disable(&cdev->offload);
+ } else {
+ napi_disable(&cdev->napi);
}
close_candev(dev);
+ m_can_clk_stop(cdev);
phy_power_off(cdev->transceiver);
return 0;
@@ -1991,8 +1991,17 @@ static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
{
struct m_can_classdev *cdev = container_of(timer, struct
m_can_classdev, hrtimer);
+ int ret;
+
+ if (cdev->can.state == CAN_STATE_BUS_OFF ||
+ cdev->can.state == CAN_STATE_STOPPED)
+ return HRTIMER_NORESTART;
- m_can_isr(0, cdev->net);
+ ret = m_can_interrupt_handler(cdev);
+
+ /* On error or if napi is scheduled to read, stop the timer */
+ if (ret < 0 || napi_is_scheduled(&cdev->napi))
+ return HRTIMER_NORESTART;
hrtimer_forward_now(timer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS));
@@ -2021,6 +2030,8 @@ static int m_can_open(struct net_device *dev)
if (cdev->is_peripheral)
can_rx_offload_enable(&cdev->offload);
+ else
+ napi_enable(&cdev->napi);
/* register interrupt handler */
if (cdev->is_peripheral) {
@@ -2052,21 +2063,23 @@ static int m_can_open(struct net_device *dev)
/* start the m_can controller */
err = m_can_start(dev);
if (err)
- goto exit_irq_fail;
-
- if (!cdev->is_peripheral)
- napi_enable(&cdev->napi);
+ goto exit_start_fail;
netif_start_queue(dev);
return 0;
+exit_start_fail:
+ if (cdev->is_peripheral || dev->irq)
+ free_irq(dev->irq, dev);
exit_irq_fail:
if (cdev->is_peripheral)
destroy_workqueue(cdev->tx_wq);
out_wq_fail:
if (cdev->is_peripheral)
can_rx_offload_disable(&cdev->offload);
+ else
+ napi_disable(&cdev->napi);
close_candev(dev);
exit_disable_clks:
m_can_clk_stop(cdev);
@@ -2172,7 +2185,7 @@ static int m_can_set_coalesce(struct net_device *dev,
return 0;
}
-static const struct ethtool_ops m_can_ethtool_ops = {
+static const struct ethtool_ops m_can_ethtool_ops_coalescing = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_TX_USECS_IRQ |
@@ -2183,18 +2196,20 @@ static const struct ethtool_ops m_can_ethtool_ops = {
.set_coalesce = m_can_set_coalesce,
};
-static const struct ethtool_ops m_can_ethtool_ops_polling = {
+static const struct ethtool_ops m_can_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
};
-static int register_m_can_dev(struct net_device *dev)
+static int register_m_can_dev(struct m_can_classdev *cdev)
{
+ struct net_device *dev = cdev->net;
+
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &m_can_netdev_ops;
- if (dev->irq)
- dev->ethtool_ops = &m_can_ethtool_ops;
+ if (dev->irq && cdev->is_peripheral)
+ dev->ethtool_ops = &m_can_ethtool_ops_coalescing;
else
- dev->ethtool_ops = &m_can_ethtool_ops_polling;
+ dev->ethtool_ops = &m_can_ethtool_ops;
return register_candev(dev);
}
@@ -2380,7 +2395,7 @@ int m_can_class_register(struct m_can_classdev *cdev)
if (ret)
goto rx_offload_del;
- ret = register_m_can_dev(cdev->net);
+ ret = register_m_can_dev(cdev);
if (ret) {
dev_err(cdev->dev, "registering %s failed (err=%d)\n",
cdev->net->name, ret);
@@ -2427,12 +2442,15 @@ int m_can_class_suspend(struct device *dev)
netif_device_detach(ndev);
/* leave the chip running with rx interrupt enabled if it is
- * used as a wake-up source.
+ * used as a wake-up source. Coalescing needs to be reset then,
+ * the timer is cancelled here, interrupts are done in resume.
*/
- if (cdev->pm_wake_source)
+ if (cdev->pm_wake_source) {
+ hrtimer_cancel(&cdev->hrtimer);
m_can_write(cdev, M_CAN_IE, IR_RF0N);
- else
+ } else {
m_can_stop(ndev);
+ }
m_can_clk_stop(cdev);
}
@@ -2462,6 +2480,13 @@ int m_can_class_resume(struct device *dev)
return ret;
if (cdev->pm_wake_source) {
+ /* Restore active interrupts but disable coalescing as
+ * we may have missed important waterlevel interrupts
+ * between suspend and resume. Timers are already
+ * stopped in suspend. Here we enable all interrupts
+ * again.
+ */
+ cdev->active_interrupts |= IR_RF0N | IR_TEFN;
m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
} else {
ret = m_can_start(ndev);
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 983ab80260dd..b832566efda0 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -231,7 +231,7 @@ static struct platform_driver m_can_plat_driver = {
.pm = &m_can_pmops,
},
.probe = m_can_plat_probe,
- .remove_new = m_can_plat_remove,
+ .remove = m_can_plat_remove,
};
module_platform_driver(m_can_plat_driver);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 5b3d69c3b6b6..0080c39ee182 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -435,7 +435,7 @@ static struct platform_driver mpc5xxx_can_driver = {
.of_match_table = mpc5xxx_can_table,
},
.probe = mpc5xxx_can_probe,
- .remove_new = mpc5xxx_can_remove,
+ .remove = mpc5xxx_can_remove,
#ifdef CONFIG_PM
.suspend = mpc5xxx_can_suspend,
.resume = mpc5xxx_can_resume,
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index b50005397463..28f3fd805273 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -781,11 +781,8 @@ static int peak_get_ts_info(struct net_device *dev,
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF);
info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index f5aa5dbacaf2..2b7dd359f27b 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -907,7 +907,7 @@ static struct platform_driver rcar_can_driver = {
.pm = &rcar_can_pm_ops,
},
.probe = rcar_can_probe,
- .remove_new = rcar_can_remove,
+ .remove = rcar_can_remove,
};
module_platform_driver(rcar_can_driver);
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index c919668bbe7a..df1a5d0b37b2 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -2118,7 +2118,7 @@ static struct platform_driver rcar_canfd_driver = {
.pm = &rcar_canfd_pm_ops,
},
.probe = rcar_canfd_probe,
- .remove_new = rcar_canfd_remove,
+ .remove = rcar_canfd_remove,
};
module_platform_driver(rcar_canfd_driver);
diff --git a/drivers/net/can/rockchip/Kconfig b/drivers/net/can/rockchip/Kconfig
new file mode 100644
index 000000000000..e029e2a3ca4b
--- /dev/null
+++ b/drivers/net/can/rockchip/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config CAN_ROCKCHIP_CANFD
+ tristate "Rockchip CAN-FD controller"
+ depends on OF || COMPILE_TEST
+ select CAN_RX_OFFLOAD
+ help
+ Say Y here if you want to use CAN-FD controller found on
+ Rockchip SoCs.
diff --git a/drivers/net/can/rockchip/Makefile b/drivers/net/can/rockchip/Makefile
new file mode 100644
index 000000000000..3760d3e1baa3
--- /dev/null
+++ b/drivers/net/can/rockchip/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_ROCKCHIP_CANFD) += rockchip_canfd.o
+
+rockchip_canfd-objs :=
+rockchip_canfd-objs += rockchip_canfd-core.o
+rockchip_canfd-objs += rockchip_canfd-ethtool.o
+rockchip_canfd-objs += rockchip_canfd-rx.o
+rockchip_canfd-objs += rockchip_canfd-timestamp.o
+rockchip_canfd-objs += rockchip_canfd-tx.o
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
new file mode 100644
index 000000000000..df18c85fc078
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -0,0 +1,967 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// Rockchip CANFD driver
+//
+// Copyright (c) 2020 Rockchip Electronics Co. Ltd.
+//
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/string.h>
+
+#include "rockchip_canfd.h"
+
+static const struct rkcanfd_devtype_data rkcanfd_devtype_data_rk3568v2 = {
+ .model = RKCANFD_MODEL_RK3568V2,
+ .quirks = RKCANFD_QUIRK_RK3568_ERRATUM_1 | RKCANFD_QUIRK_RK3568_ERRATUM_2 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_3 | RKCANFD_QUIRK_RK3568_ERRATUM_4 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_5 | RKCANFD_QUIRK_RK3568_ERRATUM_6 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_7 | RKCANFD_QUIRK_RK3568_ERRATUM_8 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_9 | RKCANFD_QUIRK_RK3568_ERRATUM_10 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_11 | RKCANFD_QUIRK_RK3568_ERRATUM_12 |
+ RKCANFD_QUIRK_CANFD_BROKEN,
+};
+
+/* The rk3568 CAN-FD errata sheet as of Tue 07 Nov 2023 11:25:31 +08:00
+ * states that only the rk3568v2 is affected by erratum 5, but tests
+ * with the rk3568v2 and rk3568v3 show that the RX_FIFO_CNT is
+ * sometimes too high. In contrast to the errata sheet mark rk3568v3
+ * as effected by erratum 5, too.
+ */
+static const struct rkcanfd_devtype_data rkcanfd_devtype_data_rk3568v3 = {
+ .model = RKCANFD_MODEL_RK3568V3,
+ .quirks = RKCANFD_QUIRK_RK3568_ERRATUM_1 | RKCANFD_QUIRK_RK3568_ERRATUM_2 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_5 | RKCANFD_QUIRK_RK3568_ERRATUM_7 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_8 | RKCANFD_QUIRK_RK3568_ERRATUM_10 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_11 | RKCANFD_QUIRK_RK3568_ERRATUM_12 |
+ RKCANFD_QUIRK_CANFD_BROKEN,
+};
+
+static const char *__rkcanfd_get_model_str(enum rkcanfd_model model)
+{
+ switch (model) {
+ case RKCANFD_MODEL_RK3568V2:
+ return "rk3568v2";
+ case RKCANFD_MODEL_RK3568V3:
+ return "rk3568v3";
+ }
+
+ return "<unknown>";
+}
+
+static inline const char *
+rkcanfd_get_model_str(const struct rkcanfd_priv *priv)
+{
+ return __rkcanfd_get_model_str(priv->devtype_data.model);
+}
+
+/* Note:
+ *
+ * The formula to calculate the CAN System Clock is:
+ *
+ * Tsclk = 2 x Tclk x (brp + 1)
+ *
+ * Double the data sheet's brp_min, brp_max and brp_inc values (both
+ * for the arbitration and data bit timing) to take the "2 x" into
+ * account.
+ */
+static const struct can_bittiming_const rkcanfd_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 2, /* value from data sheet x2 */
+ .brp_max = 512, /* value from data sheet x2 */
+ .brp_inc = 2, /* value from data sheet x2 */
+};
+
+static const struct can_bittiming_const rkcanfd_data_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 2, /* value from data sheet x2 */
+ .brp_max = 512, /* value from data sheet x2 */
+ .brp_inc = 2, /* value from data sheet x2 */
+};
+
+static void rkcanfd_chip_set_reset_mode(const struct rkcanfd_priv *priv)
+{
+ reset_control_assert(priv->reset);
+ udelay(2);
+ reset_control_deassert(priv->reset);
+
+ rkcanfd_write(priv, RKCANFD_REG_MODE, 0x0);
+}
+
+static void rkcanfd_chip_set_work_mode(const struct rkcanfd_priv *priv)
+{
+ rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default);
+}
+
+static int rkcanfd_set_bittiming(struct rkcanfd_priv *priv)
+{
+ const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 reg_nbt, reg_dbt, reg_tdc;
+ u32 tdco;
+
+ reg_nbt = FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_SJW,
+ bt->sjw - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_BRP,
+ (bt->brp / 2) - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG2,
+ bt->phase_seg2 - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG1,
+ bt->prop_seg + bt->phase_seg1 - 1);
+
+ rkcanfd_write(priv, RKCANFD_REG_FD_NOMINAL_BITTIMING, reg_nbt);
+
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
+ return 0;
+
+ reg_dbt = FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_SJW,
+ dbt->sjw - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_BRP,
+ (dbt->brp / 2) - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_TSEG2,
+ dbt->phase_seg2 - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_TSEG1,
+ dbt->prop_seg + dbt->phase_seg1 - 1);
+
+ rkcanfd_write(priv, RKCANFD_REG_FD_DATA_BITTIMING, reg_dbt);
+
+ tdco = (priv->can.clock.freq / dbt->bitrate) * 2 / 3;
+ tdco = min(tdco, FIELD_MAX(RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET));
+
+ reg_tdc = FIELD_PREP(RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET, tdco) |
+ RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_ENABLE;
+ rkcanfd_write(priv, RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION,
+ reg_tdc);
+
+ return 0;
+}
+
+static void rkcanfd_get_berr_counter_corrected(struct rkcanfd_priv *priv,
+ struct can_berr_counter *bec)
+{
+ struct can_berr_counter bec_raw;
+ u32 reg_state;
+
+ bec->rxerr = rkcanfd_read(priv, RKCANFD_REG_RXERRORCNT);
+ bec->txerr = rkcanfd_read(priv, RKCANFD_REG_TXERRORCNT);
+ bec_raw = *bec;
+
+ /* Tests show that sometimes both CAN bus error counters read
+ * 0x0, even if the controller is in warning mode
+ * (RKCANFD_REG_STATE_ERROR_WARNING_STATE in RKCANFD_REG_STATE
+ * set).
+ *
+ * In case both error counters read 0x0, use the struct
+ * priv->bec, otherwise save the read value to priv->bec.
+ *
+ * rkcanfd_handle_rx_int_one() handles the decrementing of
+ * priv->bec.rxerr for successfully RX'ed CAN frames.
+ *
+ * Luckily the controller doesn't decrement the RX CAN bus
+ * error counter in hardware for self received TX'ed CAN
+ * frames (RKCANFD_REG_MODE_RXSTX_MODE), so RXSTX doesn't
+ * interfere with proper RX CAN bus error counters.
+ *
+ * rkcanfd_handle_tx_done_one() handles the decrementing of
+ * priv->bec.txerr for successfully TX'ed CAN frames.
+ */
+ if (!bec->rxerr && !bec->txerr)
+ *bec = priv->bec;
+ else
+ priv->bec = *bec;
+
+ reg_state = rkcanfd_read(priv, RKCANFD_REG_STATE);
+ netdev_vdbg(priv->ndev,
+ "%s: Raw/Cor: txerr=%3u/%3u rxerr=%3u/%3u Bus Off=%u Warning=%u\n",
+ __func__,
+ bec_raw.txerr, bec->txerr, bec_raw.rxerr, bec->rxerr,
+ !!(reg_state & RKCANFD_REG_STATE_BUS_OFF_STATE),
+ !!(reg_state & RKCANFD_REG_STATE_ERROR_WARNING_STATE));
+}
+
+static int rkcanfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
+ if (err)
+ return err;
+
+ rkcanfd_get_berr_counter_corrected(priv, bec);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+}
+
+static void rkcanfd_chip_interrupts_enable(const struct rkcanfd_priv *priv)
+{
+ rkcanfd_write(priv, RKCANFD_REG_INT_MASK, priv->reg_int_mask_default);
+
+ netdev_dbg(priv->ndev, "%s: reg_int_mask=0x%08x\n", __func__,
+ rkcanfd_read(priv, RKCANFD_REG_INT_MASK));
+}
+
+static void rkcanfd_chip_interrupts_disable(const struct rkcanfd_priv *priv)
+{
+ rkcanfd_write(priv, RKCANFD_REG_INT_MASK, RKCANFD_REG_INT_ALL);
+}
+
+static void rkcanfd_chip_fifo_setup(struct rkcanfd_priv *priv)
+{
+ u32 reg;
+
+ /* TXE FIFO */
+ reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
+ reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE;
+ rkcanfd_write(priv, RKCANFD_REG_RX_FIFO_CTRL, reg);
+
+ /* RX FIFO */
+ reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
+ reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE;
+ rkcanfd_write(priv, RKCANFD_REG_RX_FIFO_CTRL, reg);
+
+ WRITE_ONCE(priv->tx_head, 0);
+ WRITE_ONCE(priv->tx_tail, 0);
+ netdev_reset_queue(priv->ndev);
+}
+
+static void rkcanfd_chip_start(struct rkcanfd_priv *priv)
+{
+ u32 reg;
+
+ rkcanfd_chip_set_reset_mode(priv);
+
+ /* Receiving Filter: accept all */
+ rkcanfd_write(priv, RKCANFD_REG_IDCODE, 0x0);
+ rkcanfd_write(priv, RKCANFD_REG_IDMASK, RKCANFD_REG_IDCODE_EXTENDED_FRAME_ID);
+
+ /* enable:
+ * - CAN_FD: enable CAN-FD
+ * - AUTO_RETX_MODE: auto retransmission on TX error
+ * - COVER_MODE: RX-FIFO overwrite mode, do not send OVERLOAD frames
+ * - RXSTX_MODE: Receive Self Transmit data mode
+ * - WORK_MODE: transition from reset to working mode
+ */
+ reg = rkcanfd_read(priv, RKCANFD_REG_MODE);
+ priv->reg_mode_default = reg |
+ RKCANFD_REG_MODE_CAN_FD_MODE_ENABLE |
+ RKCANFD_REG_MODE_AUTO_RETX_MODE |
+ RKCANFD_REG_MODE_COVER_MODE |
+ RKCANFD_REG_MODE_RXSTX_MODE |
+ RKCANFD_REG_MODE_WORK_MODE;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ priv->reg_mode_default |= RKCANFD_REG_MODE_LBACK_MODE |
+ RKCANFD_REG_MODE_SILENT_MODE |
+ RKCANFD_REG_MODE_SELF_TEST;
+
+ /* mask, i.e. ignore:
+ * - TIMESTAMP_COUNTER_OVERFLOW_INT - timestamp counter overflow interrupt
+ * - TX_ARBIT_FAIL_INT - TX arbitration fail interrupt
+ * - OVERLOAD_INT - CAN bus overload interrupt
+ * - TX_FINISH_INT - Transmit finish interrupt
+ */
+ priv->reg_int_mask_default =
+ RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT |
+ RKCANFD_REG_INT_TX_ARBIT_FAIL_INT |
+ RKCANFD_REG_INT_OVERLOAD_INT |
+ RKCANFD_REG_INT_TX_FINISH_INT;
+
+ /* Do not mask the bus error interrupt if the bus error
+ * reporting is requested.
+ */
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+ priv->reg_int_mask_default |= RKCANFD_REG_INT_ERROR_INT;
+
+ memset(&priv->bec, 0x0, sizeof(priv->bec));
+
+ rkcanfd_chip_fifo_setup(priv);
+ rkcanfd_timestamp_init(priv);
+ rkcanfd_timestamp_start(priv);
+
+ rkcanfd_set_bittiming(priv);
+
+ rkcanfd_chip_interrupts_disable(priv);
+ rkcanfd_chip_set_work_mode(priv);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ netdev_dbg(priv->ndev, "%s: reg_mode=0x%08x\n", __func__,
+ rkcanfd_read(priv, RKCANFD_REG_MODE));
+}
+
+static void __rkcanfd_chip_stop(struct rkcanfd_priv *priv, const enum can_state state)
+{
+ priv->can.state = state;
+
+ rkcanfd_chip_set_reset_mode(priv);
+ rkcanfd_chip_interrupts_disable(priv);
+}
+
+static void rkcanfd_chip_stop(struct rkcanfd_priv *priv, const enum can_state state)
+{
+ priv->can.state = state;
+
+ rkcanfd_timestamp_stop(priv);
+ __rkcanfd_chip_stop(priv, state);
+}
+
+static void rkcanfd_chip_stop_sync(struct rkcanfd_priv *priv, const enum can_state state)
+{
+ priv->can.state = state;
+
+ rkcanfd_timestamp_stop_sync(priv);
+ __rkcanfd_chip_stop(priv, state);
+}
+
+static int rkcanfd_set_mode(struct net_device *ndev,
+ enum can_mode mode)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ rkcanfd_chip_start(priv);
+ rkcanfd_chip_interrupts_enable(priv);
+ netif_wake_queue(ndev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *
+rkcanfd_alloc_can_err_skb(struct rkcanfd_priv *priv,
+ struct can_frame **cf, u32 *timestamp)
+{
+ struct sk_buff *skb;
+
+ *timestamp = rkcanfd_get_timestamp(priv);
+
+ skb = alloc_can_err_skb(priv->ndev, cf);
+ if (skb)
+ rkcanfd_skb_set_timestamp(priv, skb, *timestamp);
+
+ return skb;
+}
+
+static const char *rkcanfd_get_error_type_str(unsigned int type)
+{
+ switch (type) {
+ case RKCANFD_REG_ERROR_CODE_TYPE_BIT:
+ return "Bit";
+ case RKCANFD_REG_ERROR_CODE_TYPE_STUFF:
+ return "Stuff";
+ case RKCANFD_REG_ERROR_CODE_TYPE_FORM:
+ return "Form";
+ case RKCANFD_REG_ERROR_CODE_TYPE_ACK:
+ return "ACK";
+ case RKCANFD_REG_ERROR_CODE_TYPE_CRC:
+ return "CRC";
+ }
+
+ return "<unknown>";
+}
+
+#define RKCAN_ERROR_CODE(reg_ec, code) \
+ ((reg_ec) & RKCANFD_REG_ERROR_CODE_##code ? __stringify(code) " " : "")
+
+static void
+rkcanfd_handle_error_int_reg_ec(struct rkcanfd_priv *priv, struct can_frame *cf,
+ const u32 reg_ec)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ unsigned int type;
+ u32 reg_state, reg_cmd;
+
+ type = FIELD_GET(RKCANFD_REG_ERROR_CODE_TYPE, reg_ec);
+ reg_cmd = rkcanfd_read(priv, RKCANFD_REG_CMD);
+ reg_state = rkcanfd_read(priv, RKCANFD_REG_STATE);
+
+ netdev_dbg(priv->ndev, "%s Error in %s %s Phase: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s(0x%08x) CMD=%u RX=%u TX=%u Error-Warning=%u Bus-Off=%u\n",
+ rkcanfd_get_error_type_str(type),
+ reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX ? "RX" : "TX",
+ reg_ec & RKCANFD_REG_ERROR_CODE_PHASE ? "Data" : "Arbitration",
+ RKCAN_ERROR_CODE(reg_ec, TX_OVERLOAD),
+ RKCAN_ERROR_CODE(reg_ec, TX_ERROR),
+ RKCAN_ERROR_CODE(reg_ec, TX_ACK),
+ RKCAN_ERROR_CODE(reg_ec, TX_ACK_EOF),
+ RKCAN_ERROR_CODE(reg_ec, TX_CRC),
+ RKCAN_ERROR_CODE(reg_ec, TX_STUFF_COUNT),
+ RKCAN_ERROR_CODE(reg_ec, TX_DATA),
+ RKCAN_ERROR_CODE(reg_ec, TX_SOF_DLC),
+ RKCAN_ERROR_CODE(reg_ec, TX_IDLE),
+ RKCAN_ERROR_CODE(reg_ec, RX_BUF_INT),
+ RKCAN_ERROR_CODE(reg_ec, RX_SPACE),
+ RKCAN_ERROR_CODE(reg_ec, RX_EOF),
+ RKCAN_ERROR_CODE(reg_ec, RX_ACK_LIM),
+ RKCAN_ERROR_CODE(reg_ec, RX_ACK),
+ RKCAN_ERROR_CODE(reg_ec, RX_CRC_LIM),
+ RKCAN_ERROR_CODE(reg_ec, RX_CRC),
+ RKCAN_ERROR_CODE(reg_ec, RX_STUFF_COUNT),
+ RKCAN_ERROR_CODE(reg_ec, RX_DATA),
+ RKCAN_ERROR_CODE(reg_ec, RX_DLC),
+ RKCAN_ERROR_CODE(reg_ec, RX_BRS_ESI),
+ RKCAN_ERROR_CODE(reg_ec, RX_RES),
+ RKCAN_ERROR_CODE(reg_ec, RX_FDF),
+ RKCAN_ERROR_CODE(reg_ec, RX_ID2_RTR),
+ RKCAN_ERROR_CODE(reg_ec, RX_SOF_IDE),
+ RKCAN_ERROR_CODE(reg_ec, RX_IDLE),
+ reg_ec, reg_cmd,
+ !!(reg_state & RKCANFD_REG_STATE_RX_PERIOD),
+ !!(reg_state & RKCANFD_REG_STATE_TX_PERIOD),
+ !!(reg_state & RKCANFD_REG_STATE_ERROR_WARNING_STATE),
+ !!(reg_state & RKCANFD_REG_STATE_BUS_OFF_STATE));
+
+ priv->can.can_stats.bus_error++;
+
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX)
+ stats->rx_errors++;
+ else
+ stats->tx_errors++;
+
+ if (!cf)
+ return;
+
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX) {
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_SOF_IDE)
+ cf->data[3] = CAN_ERR_PROT_LOC_SOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ID2_RTR)
+ cf->data[3] = CAN_ERR_PROT_LOC_RTR;
+ /* RKCANFD_REG_ERROR_CODE_RX_FDF */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_RES)
+ cf->data[3] = CAN_ERR_PROT_LOC_RES0;
+ /* RKCANFD_REG_ERROR_CODE_RX_BRS_ESI */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_DLC)
+ cf->data[3] = CAN_ERR_PROT_LOC_DLC;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_DATA)
+ cf->data[3] = CAN_ERR_PROT_LOC_DATA;
+ /* RKCANFD_REG_ERROR_CODE_RX_STUFF_COUNT */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_CRC)
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_CRC_LIM)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ACK)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ACK_LIM)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_EOF)
+ cf->data[3] = CAN_ERR_PROT_LOC_EOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_SPACE)
+ cf->data[3] = CAN_ERR_PROT_LOC_EOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_BUF_INT)
+ cf->data[3] = CAN_ERR_PROT_LOC_INTERM;
+ } else {
+ cf->data[2] |= CAN_ERR_PROT_TX;
+
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_SOF_DLC)
+ cf->data[3] = CAN_ERR_PROT_LOC_SOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_DATA)
+ cf->data[3] = CAN_ERR_PROT_LOC_DATA;
+ /* RKCANFD_REG_ERROR_CODE_TX_STUFF_COUNT */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_CRC)
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_ACK_EOF)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_ACK)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ /* RKCANFD_REG_ERROR_CODE_TX_ERROR */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_OVERLOAD)
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ }
+
+ switch (reg_ec & RKCANFD_REG_ERROR_CODE_TYPE) {
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_BIT):
+
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_STUFF):
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_FORM):
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_ACK):
+ cf->can_id |= CAN_ERR_ACK;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_CRC):
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ break;
+ }
+}
+
+static int rkcanfd_handle_error_int(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct can_frame *cf = NULL;
+ u32 reg_ec, timestamp;
+ struct sk_buff *skb;
+ int err;
+
+ reg_ec = rkcanfd_read(priv, RKCANFD_REG_ERROR_CODE);
+
+ if (!reg_ec)
+ return 0;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ if (cf) {
+ struct can_berr_counter bec;
+
+ rkcanfd_get_berr_counter_corrected(priv, &bec);
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ }
+
+ rkcanfd_handle_error_int_reg_ec(priv, cf, reg_ec);
+
+ if (!cf)
+ return 0;
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int rkcanfd_handle_state_error_int(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ enum can_state new_state, rx_state, tx_state;
+ struct net_device *ndev = priv->ndev;
+ struct can_berr_counter bec;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb;
+ u32 timestamp;
+ int err;
+
+ rkcanfd_get_berr_counter_corrected(priv, &bec);
+ can_state_get_by_berr_counter(ndev, &bec, &tx_state, &rx_state);
+
+ new_state = max(tx_state, rx_state);
+ if (new_state == priv->can.state)
+ return 0;
+
+ /* The skb allocation might fail, but can_change_state()
+ * handles cf == NULL.
+ */
+ skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ can_change_state(ndev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ rkcanfd_chip_stop(priv, CAN_STATE_BUS_OFF);
+ can_bus_off(ndev);
+ }
+
+ if (!skb)
+ return 0;
+
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int
+rkcanfd_handle_rx_fifo_overflow_int(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct can_berr_counter bec;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb;
+ u32 timestamp;
+ int err;
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ netdev_dbg(priv->ndev, "RX-FIFO overflow\n");
+
+ skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ if (skb)
+ return 0;
+
+ rkcanfd_get_berr_counter_corrected(priv, &bec);
+
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+#define rkcanfd_handle(priv, irq, ...) \
+({ \
+ struct rkcanfd_priv *_priv = (priv); \
+ int err; \
+\
+ err = rkcanfd_handle_##irq(_priv, ## __VA_ARGS__); \
+ if (err) \
+ netdev_err(_priv->ndev, \
+ "IRQ handler rkcanfd_handle_%s() returned error: %pe\n", \
+ __stringify(irq), ERR_PTR(err)); \
+ err; \
+})
+
+static irqreturn_t rkcanfd_irq(int irq, void *dev_id)
+{
+ struct rkcanfd_priv *priv = dev_id;
+ u32 reg_int_unmasked, reg_int;
+
+ reg_int_unmasked = rkcanfd_read(priv, RKCANFD_REG_INT);
+ reg_int = reg_int_unmasked & ~priv->reg_int_mask_default;
+
+ if (!reg_int)
+ return IRQ_NONE;
+
+ /* First ACK then handle, to avoid lost-IRQ race condition on
+ * fast re-occurring interrupts.
+ */
+ rkcanfd_write(priv, RKCANFD_REG_INT, reg_int);
+
+ if (reg_int & RKCANFD_REG_INT_RX_FINISH_INT)
+ rkcanfd_handle(priv, rx_int);
+
+ if (reg_int & RKCANFD_REG_INT_ERROR_INT)
+ rkcanfd_handle(priv, error_int);
+
+ if (reg_int & (RKCANFD_REG_INT_BUS_OFF_INT |
+ RKCANFD_REG_INT_PASSIVE_ERROR_INT |
+ RKCANFD_REG_INT_ERROR_WARNING_INT) ||
+ priv->can.state > CAN_STATE_ERROR_ACTIVE)
+ rkcanfd_handle(priv, state_error_int);
+
+ if (reg_int & RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT)
+ rkcanfd_handle(priv, rx_fifo_overflow_int);
+
+ if (reg_int & ~(RKCANFD_REG_INT_ALL_ERROR |
+ RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT |
+ RKCANFD_REG_INT_RX_FINISH_INT))
+ netdev_err(priv->ndev, "%s: int=0x%08x\n", __func__, reg_int);
+
+ if (reg_int & RKCANFD_REG_INT_WAKEUP_INT)
+ netdev_info(priv->ndev, "%s: WAKEUP_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_TXE_FIFO_FULL_INT)
+ netdev_info(priv->ndev, "%s: TXE_FIFO_FULL_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_TXE_FIFO_OV_INT)
+ netdev_info(priv->ndev, "%s: TXE_FIFO_OV_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT)
+ netdev_info(priv->ndev, "%s: BUS_OFF_RECOVERY_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_RX_FIFO_FULL_INT)
+ netdev_info(priv->ndev, "%s: RX_FIFO_FULL_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_OVERLOAD_INT)
+ netdev_info(priv->ndev, "%s: OVERLOAD_INT\n", __func__);
+
+ can_rx_offload_irq_finish(&priv->offload);
+
+ return IRQ_HANDLED;
+}
+
+static int rkcanfd_open(struct net_device *ndev)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = open_candev(ndev);
+ if (err)
+ return err;
+
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
+ if (err)
+ goto out_close_candev;
+
+ rkcanfd_chip_start(priv);
+ can_rx_offload_enable(&priv->offload);
+
+ err = request_irq(ndev->irq, rkcanfd_irq, IRQF_SHARED, ndev->name, priv);
+ if (err)
+ goto out_rkcanfd_chip_stop;
+
+ rkcanfd_chip_interrupts_enable(priv);
+
+ netif_start_queue(ndev);
+
+ return 0;
+
+out_rkcanfd_chip_stop:
+ rkcanfd_chip_stop_sync(priv, CAN_STATE_STOPPED);
+ pm_runtime_put(ndev->dev.parent);
+out_close_candev:
+ close_candev(ndev);
+ return err;
+}
+
+static int rkcanfd_stop(struct net_device *ndev)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+
+ rkcanfd_chip_interrupts_disable(priv);
+ free_irq(ndev->irq, priv);
+ can_rx_offload_disable(&priv->offload);
+ rkcanfd_chip_stop_sync(priv, CAN_STATE_STOPPED);
+ close_candev(ndev);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+}
+
+static const struct net_device_ops rkcanfd_netdev_ops = {
+ .ndo_open = rkcanfd_open,
+ .ndo_stop = rkcanfd_stop,
+ .ndo_start_xmit = rkcanfd_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static int __maybe_unused rkcanfd_runtime_suspend(struct device *dev)
+{
+ struct rkcanfd_priv *priv = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(priv->clks_num, priv->clks);
+
+ return 0;
+}
+
+static int __maybe_unused rkcanfd_runtime_resume(struct device *dev)
+{
+ struct rkcanfd_priv *priv = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(priv->clks_num, priv->clks);
+}
+
+static void rkcanfd_register_done(const struct rkcanfd_priv *priv)
+{
+ u32 dev_id;
+
+ dev_id = rkcanfd_read(priv, RKCANFD_REG_RTL_VERSION);
+
+ netdev_info(priv->ndev,
+ "Rockchip-CANFD %s rev%lu.%lu (errata 0x%04x) found\n",
+ rkcanfd_get_model_str(priv),
+ FIELD_GET(RKCANFD_REG_RTL_VERSION_MAJOR, dev_id),
+ FIELD_GET(RKCANFD_REG_RTL_VERSION_MINOR, dev_id),
+ priv->devtype_data.quirks);
+
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_5 &&
+ priv->can.clock.freq < RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN)
+ netdev_info(priv->ndev,
+ "Erratum 5: CAN clock frequency (%luMHz) lower than known good (%luMHz), expect degraded performance\n",
+ priv->can.clock.freq / MEGA,
+ RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN / MEGA);
+}
+
+static int rkcanfd_register(struct rkcanfd_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ int err;
+
+ pm_runtime_enable(ndev->dev.parent);
+
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
+ if (err)
+ goto out_pm_runtime_disable;
+
+ rkcanfd_ethtool_init(priv);
+
+ err = register_candev(ndev);
+ if (err)
+ goto out_pm_runtime_put_sync;
+
+ rkcanfd_register_done(priv);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+
+out_pm_runtime_put_sync:
+ pm_runtime_put_sync(ndev->dev.parent);
+out_pm_runtime_disable:
+ pm_runtime_disable(ndev->dev.parent);
+
+ return err;
+}
+
+static inline void rkcanfd_unregister(struct rkcanfd_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+
+ unregister_candev(ndev);
+ pm_runtime_disable(ndev->dev.parent);
+}
+
+static const struct of_device_id rkcanfd_of_match[] = {
+ {
+ .compatible = "rockchip,rk3568v2-canfd",
+ .data = &rkcanfd_devtype_data_rk3568v2,
+ }, {
+ .compatible = "rockchip,rk3568v3-canfd",
+ .data = &rkcanfd_devtype_data_rk3568v3,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, rkcanfd_of_match);
+
+static int rkcanfd_probe(struct platform_device *pdev)
+{
+ struct rkcanfd_priv *priv;
+ struct net_device *ndev;
+ const void *match;
+ int err;
+
+ ndev = alloc_candev(sizeof(struct rkcanfd_priv), RKCANFD_TXFIFO_DEPTH);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ err = ndev->irq;
+ goto out_free_candev;
+ }
+
+ priv->clks_num = devm_clk_bulk_get_all(&pdev->dev, &priv->clks);
+ if (priv->clks_num < 0) {
+ err = priv->clks_num;
+ goto out_free_candev;
+ }
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs)) {
+ err = PTR_ERR(priv->regs);
+ goto out_free_candev;
+ }
+
+ priv->reset = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(priv->reset)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(priv->reset),
+ "Failed to get reset line\n");
+ goto out_free_candev;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->netdev_ops = &rkcanfd_netdev_ops;
+ ndev->flags |= IFF_ECHO;
+
+ platform_set_drvdata(pdev, priv);
+ priv->can.clock.freq = clk_get_rate(priv->clks[0].clk);
+ priv->can.bittiming_const = &rkcanfd_bittiming_const;
+ priv->can.data_bittiming_const = &rkcanfd_data_bittiming_const;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_BERR_REPORTING;
+ if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN))
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ priv->can.do_set_mode = rkcanfd_set_mode;
+ priv->can.do_get_berr_counter = rkcanfd_get_berr_counter;
+ priv->ndev = ndev;
+
+ match = device_get_match_data(&pdev->dev);
+ if (match)
+ priv->devtype_data = *(struct rkcanfd_devtype_data *)match;
+
+ err = can_rx_offload_add_manual(ndev, &priv->offload,
+ RKCANFD_NAPI_WEIGHT);
+ if (err)
+ goto out_free_candev;
+
+ err = rkcanfd_register(priv);
+ if (err)
+ goto out_can_rx_offload_del;
+
+ return 0;
+
+out_can_rx_offload_del:
+ can_rx_offload_del(&priv->offload);
+out_free_candev:
+ free_candev(ndev);
+
+ return err;
+}
+
+static void rkcanfd_remove(struct platform_device *pdev)
+{
+ struct rkcanfd_priv *priv = platform_get_drvdata(pdev);
+ struct net_device *ndev = priv->ndev;
+
+ can_rx_offload_del(&priv->offload);
+ rkcanfd_unregister(priv);
+ free_candev(ndev);
+}
+
+static const struct dev_pm_ops rkcanfd_pm_ops = {
+ SET_RUNTIME_PM_OPS(rkcanfd_runtime_suspend,
+ rkcanfd_runtime_resume, NULL)
+};
+
+static struct platform_driver rkcanfd_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .pm = &rkcanfd_pm_ops,
+ .of_match_table = rkcanfd_of_match,
+ },
+ .probe = rkcanfd_probe,
+ .remove = rkcanfd_remove,
+};
+module_platform_driver(rkcanfd_driver);
+
+MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
+MODULE_DESCRIPTION("Rockchip CAN-FD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/rockchip/rockchip_canfd-ethtool.c b/drivers/net/can/rockchip/rockchip_canfd-ethtool.c
new file mode 100644
index 000000000000..5aeeef64a67a
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-ethtool.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/ethtool.h>
+
+#include "rockchip_canfd.h"
+
+enum rkcanfd_stats_type {
+ RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS,
+ RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS,
+};
+
+static const char rkcanfd_stats_strings[][ETH_GSTRING_LEN] = {
+ [RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS] = "rx_fifo_empty_errors",
+ [RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS] = "tx_extended_as_standard_errors",
+};
+
+static void
+rkcanfd_ethtool_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, rkcanfd_stats_strings,
+ sizeof(rkcanfd_stats_strings));
+ }
+}
+
+static int rkcanfd_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(rkcanfd_stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+rkcanfd_ethtool_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ struct rkcanfd_stats *rkcanfd_stats;
+ unsigned int start;
+
+ rkcanfd_stats = &priv->stats;
+
+ do {
+ start = u64_stats_fetch_begin(&rkcanfd_stats->syncp);
+
+ data[RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS] =
+ u64_stats_read(&rkcanfd_stats->rx_fifo_empty_errors);
+ data[RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS] =
+ u64_stats_read(&rkcanfd_stats->tx_extended_as_standard_errors);
+ } while (u64_stats_fetch_retry(&rkcanfd_stats->syncp, start));
+}
+
+static const struct ethtool_ops rkcanfd_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+ .get_strings = rkcanfd_ethtool_get_strings,
+ .get_sset_count = rkcanfd_ethtool_get_sset_count,
+ .get_ethtool_stats = rkcanfd_ethtool_get_ethtool_stats,
+};
+
+void rkcanfd_ethtool_init(struct rkcanfd_priv *priv)
+{
+ priv->ndev->ethtool_ops = &rkcanfd_ethtool_ops;
+
+ u64_stats_init(&priv->stats.syncp);
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd-rx.c b/drivers/net/can/rockchip/rockchip_canfd-rx.c
new file mode 100644
index 000000000000..475c0409e215
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-rx.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <net/netdev_queues.h>
+
+#include "rockchip_canfd.h"
+
+static bool rkcanfd_can_frame_header_equal(const struct canfd_frame *const cfd1,
+ const struct canfd_frame *const cfd2,
+ const bool is_canfd)
+{
+ const u8 mask_flags = CANFD_BRS | CANFD_ESI | CANFD_FDF;
+ canid_t mask = CAN_EFF_FLAG;
+
+ if (canfd_sanitize_len(cfd1->len) != canfd_sanitize_len(cfd2->len))
+ return false;
+
+ if (!is_canfd)
+ mask |= CAN_RTR_FLAG;
+
+ if (cfd1->can_id & CAN_EFF_FLAG)
+ mask |= CAN_EFF_MASK;
+ else
+ mask |= CAN_SFF_MASK;
+
+ if ((cfd1->can_id & mask) != (cfd2->can_id & mask))
+ return false;
+
+ if (is_canfd &&
+ (cfd1->flags & mask_flags) != (cfd2->flags & mask_flags))
+ return false;
+
+ return true;
+}
+
+static bool rkcanfd_can_frame_data_equal(const struct canfd_frame *cfd1,
+ const struct canfd_frame *cfd2,
+ const bool is_canfd)
+{
+ u8 len;
+
+ if (!is_canfd && (cfd1->can_id & CAN_RTR_FLAG))
+ return true;
+
+ len = canfd_sanitize_len(cfd1->len);
+
+ return !memcmp(cfd1->data, cfd2->data, len);
+}
+
+static unsigned int
+rkcanfd_fifo_header_to_cfd_header(const struct rkcanfd_priv *priv,
+ const struct rkcanfd_fifo_header *header,
+ struct canfd_frame *cfd)
+{
+ unsigned int len = sizeof(*cfd) - sizeof(cfd->data);
+ u8 dlc;
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT)
+ cfd->can_id = FIELD_GET(RKCANFD_REG_FD_ID_EFF, header->id) |
+ CAN_EFF_FLAG;
+ else
+ cfd->can_id = FIELD_GET(RKCANFD_REG_FD_ID_SFF, header->id);
+
+ dlc = FIELD_GET(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH,
+ header->frameinfo);
+
+ /* CAN-FD */
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF) {
+ cfd->len = can_fd_dlc2len(dlc);
+
+ /* The cfd is not allocated by alloc_canfd_skb(), so
+ * set CANFD_FDF here.
+ */
+ cfd->flags |= CANFD_FDF;
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_BRS)
+ cfd->flags |= CANFD_BRS;
+ } else {
+ cfd->len = can_cc_dlc2len(dlc);
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_RTR) {
+ cfd->can_id |= CAN_RTR_FLAG;
+
+ return len;
+ }
+ }
+
+ return len + cfd->len;
+}
+
+static int rkcanfd_rxstx_filter(struct rkcanfd_priv *priv,
+ const struct canfd_frame *cfd_rx, const u32 ts,
+ bool *tx_done)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct rkcanfd_stats *rkcanfd_stats = &priv->stats;
+ const struct canfd_frame *cfd_nominal;
+ const struct sk_buff *skb;
+ unsigned int tx_tail;
+
+ tx_tail = rkcanfd_get_tx_tail(priv);
+ skb = priv->can.echo_skb[tx_tail];
+ if (!skb) {
+ netdev_err(priv->ndev,
+ "%s: echo_skb[%u]=NULL tx_head=0x%08x tx_tail=0x%08x\n",
+ __func__, tx_tail,
+ priv->tx_head, priv->tx_tail);
+
+ return -ENOMSG;
+ }
+ cfd_nominal = (struct canfd_frame *)skb->data;
+
+ /* We RX'ed a frame identical to our pending TX frame. */
+ if (rkcanfd_can_frame_header_equal(cfd_rx, cfd_nominal,
+ cfd_rx->flags & CANFD_FDF) &&
+ rkcanfd_can_frame_data_equal(cfd_rx, cfd_nominal,
+ cfd_rx->flags & CANFD_FDF)) {
+ unsigned int frame_len;
+
+ rkcanfd_handle_tx_done_one(priv, ts, &frame_len);
+
+ WRITE_ONCE(priv->tx_tail, priv->tx_tail + 1);
+ netif_subqueue_completed_wake(priv->ndev, 0, 1, frame_len,
+ rkcanfd_get_effective_tx_free(priv),
+ RKCANFD_TX_START_THRESHOLD);
+
+ *tx_done = true;
+
+ return 0;
+ }
+
+ if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_6))
+ return 0;
+
+ /* Erratum 6: Extended frames may be send as standard frames.
+ *
+ * Not affected if:
+ * - TX'ed a standard frame -or-
+ * - RX'ed an extended frame
+ */
+ if (!(cfd_nominal->can_id & CAN_EFF_FLAG) ||
+ (cfd_rx->can_id & CAN_EFF_FLAG))
+ return 0;
+
+ /* Not affected if:
+ * - standard part and RTR flag of the TX'ed frame
+ * is not equal the CAN-ID and RTR flag of the RX'ed frame.
+ */
+ if ((cfd_nominal->can_id & (CAN_RTR_FLAG | CAN_SFF_MASK)) !=
+ (cfd_rx->can_id & (CAN_RTR_FLAG | CAN_SFF_MASK)))
+ return 0;
+
+ /* Not affected if:
+ * - length is not the same
+ */
+ if (cfd_nominal->len != cfd_rx->len)
+ return 0;
+
+ /* Not affected if:
+ * - the data of non RTR frames is different
+ */
+ if (!(cfd_nominal->can_id & CAN_RTR_FLAG) &&
+ memcmp(cfd_nominal->data, cfd_rx->data, cfd_nominal->len))
+ return 0;
+
+ /* Affected by Erratum 6 */
+ u64_stats_update_begin(&rkcanfd_stats->syncp);
+ u64_stats_inc(&rkcanfd_stats->tx_extended_as_standard_errors);
+ u64_stats_update_end(&rkcanfd_stats->syncp);
+
+ /* Manual handling of CAN Bus Error counters. See
+ * rkcanfd_get_corrected_berr_counter() for detailed
+ * explanation.
+ */
+ if (priv->bec.txerr)
+ priv->bec.txerr--;
+
+ *tx_done = true;
+
+ stats->tx_packets++;
+ stats->tx_errors++;
+
+ rkcanfd_xmit_retry(priv);
+
+ return 0;
+}
+
+static inline bool
+rkcanfd_fifo_header_empty(const struct rkcanfd_fifo_header *header)
+{
+ /* Erratum 5: If the FIFO is empty, we read the same value for
+ * all elements.
+ */
+ return header->frameinfo == header->id &&
+ header->frameinfo == header->ts;
+}
+
+static int rkcanfd_handle_rx_int_one(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct canfd_frame cfd[1] = { }, *skb_cfd;
+ struct rkcanfd_fifo_header header[1] = { };
+ struct sk_buff *skb;
+ unsigned int len;
+ int err;
+
+ /* read header into separate struct and convert it later */
+ rkcanfd_read_rep(priv, RKCANFD_REG_RX_FIFO_RDATA,
+ header, sizeof(*header));
+ /* read data directly into cfd */
+ rkcanfd_read_rep(priv, RKCANFD_REG_RX_FIFO_RDATA,
+ cfd->data, sizeof(cfd->data));
+
+ /* Erratum 5: Counters for TXEFIFO and RXFIFO may be wrong */
+ if (rkcanfd_fifo_header_empty(header)) {
+ struct rkcanfd_stats *rkcanfd_stats = &priv->stats;
+
+ u64_stats_update_begin(&rkcanfd_stats->syncp);
+ u64_stats_inc(&rkcanfd_stats->rx_fifo_empty_errors);
+ u64_stats_update_end(&rkcanfd_stats->syncp);
+
+ return 0;
+ }
+
+ len = rkcanfd_fifo_header_to_cfd_header(priv, header, cfd);
+
+ /* Drop any received CAN-FD frames if CAN-FD mode is not
+ * requested.
+ */
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_FD)) {
+ stats->rx_dropped++;
+
+ return 0;
+ }
+
+ if (rkcanfd_get_tx_pending(priv)) {
+ bool tx_done = false;
+
+ err = rkcanfd_rxstx_filter(priv, cfd, header->ts, &tx_done);
+ if (err)
+ return err;
+ if (tx_done && !(priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK))
+ return 0;
+ }
+
+ /* Manual handling of CAN Bus Error counters. See
+ * rkcanfd_get_corrected_berr_counter() for detailed
+ * explanation.
+ */
+ if (priv->bec.rxerr)
+ priv->bec.rxerr = min(CAN_ERROR_PASSIVE_THRESHOLD,
+ priv->bec.rxerr) - 1;
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF)
+ skb = alloc_canfd_skb(priv->ndev, &skb_cfd);
+ else
+ skb = alloc_can_skb(priv->ndev, (struct can_frame **)&skb_cfd);
+
+ if (!skb) {
+ stats->rx_dropped++;
+
+ return 0;
+ }
+
+ memcpy(skb_cfd, cfd, len);
+ rkcanfd_skb_set_timestamp(priv, skb, header->ts);
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, header->ts);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static inline unsigned int
+rkcanfd_rx_fifo_get_len(const struct rkcanfd_priv *priv)
+{
+ const u32 reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
+
+ return FIELD_GET(RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_CNT, reg);
+}
+
+int rkcanfd_handle_rx_int(struct rkcanfd_priv *priv)
+{
+ unsigned int len;
+ int err;
+
+ while ((len = rkcanfd_rx_fifo_get_len(priv))) {
+ err = rkcanfd_handle_rx_int_one(priv);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
new file mode 100644
index 000000000000..43d4b5721812
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/clocksource.h>
+
+#include "rockchip_canfd.h"
+
+static u64 rkcanfd_timestamp_read(const struct cyclecounter *cc)
+{
+ const struct rkcanfd_priv *priv = container_of(cc, struct rkcanfd_priv, cc);
+
+ return rkcanfd_get_timestamp(priv);
+}
+
+void rkcanfd_skb_set_timestamp(const struct rkcanfd_priv *priv,
+ struct sk_buff *skb, const u32 timestamp)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ u64 ns;
+
+ ns = timecounter_cyc2time(&priv->tc, timestamp);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static void rkcanfd_timestamp_work(struct work_struct *work)
+{
+ const struct delayed_work *delayed_work = to_delayed_work(work);
+ struct rkcanfd_priv *priv;
+
+ priv = container_of(delayed_work, struct rkcanfd_priv, timestamp);
+ timecounter_read(&priv->tc);
+
+ schedule_delayed_work(&priv->timestamp, priv->work_delay_jiffies);
+}
+
+void rkcanfd_timestamp_init(struct rkcanfd_priv *priv)
+{
+ const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ struct cyclecounter *cc = &priv->cc;
+ u32 bitrate, div, reg, rate;
+ u64 work_delay_ns;
+ u64 max_cycles;
+
+ /* At the standard clock rate of 300Mhz on the rk3658, the 32
+ * bit timer overflows every 14s. This means that we have to
+ * poll it quite often to avoid missing a wrap around.
+ *
+ * Divide it down to a reasonable rate, at least twice the bit
+ * rate.
+ */
+ bitrate = max(bt->bitrate, dbt->bitrate);
+ div = min(DIV_ROUND_UP(priv->can.clock.freq, bitrate * 2),
+ FIELD_MAX(RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE) + 1);
+
+ reg = FIELD_PREP(RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE,
+ div - 1) |
+ RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_ENABLE;
+ rkcanfd_write(priv, RKCANFD_REG_TIMESTAMP_CTRL, reg);
+
+ cc->read = rkcanfd_timestamp_read;
+ cc->mask = CYCLECOUNTER_MASK(32);
+
+ rate = priv->can.clock.freq / div;
+ clocks_calc_mult_shift(&cc->mult, &cc->shift, rate, NSEC_PER_SEC,
+ RKCANFD_TIMESTAMP_WORK_MAX_DELAY_SEC);
+
+ max_cycles = div_u64(ULLONG_MAX, cc->mult);
+ max_cycles = min(max_cycles, cc->mask);
+ work_delay_ns = clocksource_cyc2ns(max_cycles, cc->mult, cc->shift);
+ priv->work_delay_jiffies = div_u64(work_delay_ns, 3u * NSEC_PER_SEC / HZ);
+ INIT_DELAYED_WORK(&priv->timestamp, rkcanfd_timestamp_work);
+
+ netdev_dbg(priv->ndev, "clock=%lu.%02luMHz bitrate=%lu.%02luMBit/s div=%u rate=%lu.%02luMHz mult=%u shift=%u delay=%lus\n",
+ priv->can.clock.freq / MEGA,
+ priv->can.clock.freq % MEGA / KILO / 10,
+ bitrate / MEGA,
+ bitrate % MEGA / KILO / 100,
+ div,
+ rate / MEGA,
+ rate % MEGA / KILO / 10,
+ cc->mult, cc->shift,
+ priv->work_delay_jiffies / HZ);
+}
+
+void rkcanfd_timestamp_start(struct rkcanfd_priv *priv)
+{
+ timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
+
+ schedule_delayed_work(&priv->timestamp, priv->work_delay_jiffies);
+}
+
+void rkcanfd_timestamp_stop(struct rkcanfd_priv *priv)
+{
+ cancel_delayed_work(&priv->timestamp);
+}
+
+void rkcanfd_timestamp_stop_sync(struct rkcanfd_priv *priv)
+{
+ cancel_delayed_work_sync(&priv->timestamp);
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd-tx.c b/drivers/net/can/rockchip/rockchip_canfd-tx.c
new file mode 100644
index 000000000000..865a15e033a9
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-tx.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <net/netdev_queues.h>
+
+#include "rockchip_canfd.h"
+
+static bool rkcanfd_tx_tail_is_eff(const struct rkcanfd_priv *priv)
+{
+ const struct canfd_frame *cfd;
+ const struct sk_buff *skb;
+ unsigned int tx_tail;
+
+ if (!rkcanfd_get_tx_pending(priv))
+ return false;
+
+ tx_tail = rkcanfd_get_tx_tail(priv);
+ skb = priv->can.echo_skb[tx_tail];
+ if (!skb) {
+ netdev_err(priv->ndev,
+ "%s: echo_skb[%u]=NULL tx_head=0x%08x tx_tail=0x%08x\n",
+ __func__, tx_tail,
+ priv->tx_head, priv->tx_tail);
+
+ return false;
+ }
+
+ cfd = (struct canfd_frame *)skb->data;
+
+ return cfd->can_id & CAN_EFF_FLAG;
+}
+
+unsigned int rkcanfd_get_effective_tx_free(const struct rkcanfd_priv *priv)
+{
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_6 &&
+ rkcanfd_tx_tail_is_eff(priv))
+ return 0;
+
+ return rkcanfd_get_tx_free(priv);
+}
+
+static void rkcanfd_start_xmit_write_cmd(const struct rkcanfd_priv *priv,
+ const u32 reg_cmd)
+{
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_12)
+ rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default |
+ RKCANFD_REG_MODE_SPACE_RX_MODE);
+
+ rkcanfd_write(priv, RKCANFD_REG_CMD, reg_cmd);
+
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_12)
+ rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default);
+}
+
+void rkcanfd_xmit_retry(struct rkcanfd_priv *priv)
+{
+ const unsigned int tx_head = rkcanfd_get_tx_head(priv);
+ const u32 reg_cmd = RKCANFD_REG_CMD_TX_REQ(tx_head);
+
+ rkcanfd_start_xmit_write_cmd(priv, reg_cmd);
+}
+
+netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ u32 reg_frameinfo, reg_id, reg_cmd;
+ unsigned int tx_head, frame_len;
+ const struct canfd_frame *cfd;
+ int err;
+ u8 i;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (!netif_subqueue_maybe_stop(priv->ndev, 0,
+ rkcanfd_get_effective_tx_free(priv),
+ RKCANFD_TX_STOP_THRESHOLD,
+ RKCANFD_TX_START_THRESHOLD)) {
+ if (net_ratelimit())
+ netdev_info(priv->ndev,
+ "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, tx_pending=%d)\n",
+ priv->tx_head, priv->tx_tail,
+ rkcanfd_get_tx_pending(priv));
+
+ return NETDEV_TX_BUSY;
+ }
+
+ cfd = (struct canfd_frame *)skb->data;
+
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ reg_frameinfo = RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT;
+ reg_id = FIELD_PREP(RKCANFD_REG_FD_ID_EFF, cfd->can_id);
+ } else {
+ reg_frameinfo = 0;
+ reg_id = FIELD_PREP(RKCANFD_REG_FD_ID_SFF, cfd->can_id);
+ }
+
+ if (cfd->can_id & CAN_RTR_FLAG)
+ reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_RTR;
+
+ if (can_is_canfd_skb(skb)) {
+ reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_FDF;
+
+ if (cfd->flags & CANFD_BRS)
+ reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_BRS;
+
+ reg_frameinfo |= FIELD_PREP(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH,
+ can_fd_len2dlc(cfd->len));
+ } else {
+ reg_frameinfo |= FIELD_PREP(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH,
+ cfd->len);
+ }
+
+ tx_head = rkcanfd_get_tx_head(priv);
+ reg_cmd = RKCANFD_REG_CMD_TX_REQ(tx_head);
+
+ rkcanfd_write(priv, RKCANFD_REG_FD_TXFRAMEINFO, reg_frameinfo);
+ rkcanfd_write(priv, RKCANFD_REG_FD_TXID, reg_id);
+ for (i = 0; i < cfd->len; i += 4)
+ rkcanfd_write(priv, RKCANFD_REG_FD_TXDATA0 + i,
+ *(u32 *)(cfd->data + i));
+
+ frame_len = can_skb_get_frame_len(skb);
+ err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
+ if (!err)
+ netdev_sent_queue(priv->ndev, frame_len);
+
+ WRITE_ONCE(priv->tx_head, priv->tx_head + 1);
+
+ rkcanfd_start_xmit_write_cmd(priv, reg_cmd);
+
+ netif_subqueue_maybe_stop(priv->ndev, 0,
+ rkcanfd_get_effective_tx_free(priv),
+ RKCANFD_TX_STOP_THRESHOLD,
+ RKCANFD_TX_START_THRESHOLD);
+
+ return NETDEV_TX_OK;
+}
+
+void rkcanfd_handle_tx_done_one(struct rkcanfd_priv *priv, const u32 ts,
+ unsigned int *frame_len_p)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ unsigned int tx_tail;
+ struct sk_buff *skb;
+
+ tx_tail = rkcanfd_get_tx_tail(priv);
+ skb = priv->can.echo_skb[tx_tail];
+
+ /* Manual handling of CAN Bus Error counters. See
+ * rkcanfd_get_corrected_berr_counter() for detailed
+ * explanation.
+ */
+ if (priv->bec.txerr)
+ priv->bec.txerr--;
+
+ if (skb)
+ rkcanfd_skb_set_timestamp(priv, skb, ts);
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload,
+ tx_tail, ts,
+ frame_len_p);
+ stats->tx_packets++;
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd.h b/drivers/net/can/rockchip/rockchip_canfd.h
new file mode 100644
index 000000000000..93131c7d7f54
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2023, 2024 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _ROCKCHIP_CANFD_H
+#define _ROCKCHIP_CANFD_H
+
+#include <linux/bitfield.h>
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/reset.h>
+#include <linux/skbuff.h>
+#include <linux/timecounter.h>
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/units.h>
+
+#define RKCANFD_REG_MODE 0x000
+#define RKCANFD_REG_MODE_CAN_FD_MODE_ENABLE BIT(15)
+#define RKCANFD_REG_MODE_DPEE BIT(14)
+#define RKCANFD_REG_MODE_BRSD BIT(13)
+#define RKCANFD_REG_MODE_SPACE_RX_MODE BIT(12)
+#define RKCANFD_REG_MODE_AUTO_BUS_ON BIT(11)
+#define RKCANFD_REG_MODE_AUTO_RETX_MODE BIT(10)
+#define RKCANFD_REG_MODE_OVLD_MODE BIT(9)
+#define RKCANFD_REG_MODE_COVER_MODE BIT(8)
+#define RKCANFD_REG_MODE_RXSORT_MODE BIT(7)
+#define RKCANFD_REG_MODE_TXORDER_MODE BIT(6)
+#define RKCANFD_REG_MODE_RXSTX_MODE BIT(5)
+#define RKCANFD_REG_MODE_LBACK_MODE BIT(4)
+#define RKCANFD_REG_MODE_SILENT_MODE BIT(3)
+#define RKCANFD_REG_MODE_SELF_TEST BIT(2)
+#define RKCANFD_REG_MODE_SLEEP_MODE BIT(1)
+#define RKCANFD_REG_MODE_WORK_MODE BIT(0)
+
+#define RKCANFD_REG_CMD 0x004
+#define RKCANFD_REG_CMD_TX1_REQ BIT(1)
+#define RKCANFD_REG_CMD_TX0_REQ BIT(0)
+#define RKCANFD_REG_CMD_TX_REQ(i) (RKCANFD_REG_CMD_TX0_REQ << (i))
+
+#define RKCANFD_REG_STATE 0x008
+#define RKCANFD_REG_STATE_SLEEP_STATE BIT(6)
+#define RKCANFD_REG_STATE_BUS_OFF_STATE BIT(5)
+#define RKCANFD_REG_STATE_ERROR_WARNING_STATE BIT(4)
+#define RKCANFD_REG_STATE_TX_PERIOD BIT(3)
+#define RKCANFD_REG_STATE_RX_PERIOD BIT(2)
+#define RKCANFD_REG_STATE_TX_BUFFER_FULL BIT(1)
+#define RKCANFD_REG_STATE_RX_BUFFER_FULL BIT(0)
+
+#define RKCANFD_REG_INT 0x00c
+#define RKCANFD_REG_INT_WAKEUP_INT BIT(14)
+#define RKCANFD_REG_INT_TXE_FIFO_FULL_INT BIT(13)
+#define RKCANFD_REG_INT_TXE_FIFO_OV_INT BIT(12)
+#define RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT BIT(11)
+#define RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT BIT(10)
+#define RKCANFD_REG_INT_BUS_OFF_INT BIT(9)
+#define RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT BIT(8)
+#define RKCANFD_REG_INT_RX_FIFO_FULL_INT BIT(7)
+#define RKCANFD_REG_INT_ERROR_INT BIT(6)
+#define RKCANFD_REG_INT_TX_ARBIT_FAIL_INT BIT(5)
+#define RKCANFD_REG_INT_PASSIVE_ERROR_INT BIT(4)
+#define RKCANFD_REG_INT_OVERLOAD_INT BIT(3)
+#define RKCANFD_REG_INT_ERROR_WARNING_INT BIT(2)
+#define RKCANFD_REG_INT_TX_FINISH_INT BIT(1)
+#define RKCANFD_REG_INT_RX_FINISH_INT BIT(0)
+
+#define RKCANFD_REG_INT_ALL \
+ (RKCANFD_REG_INT_WAKEUP_INT | \
+ RKCANFD_REG_INT_TXE_FIFO_FULL_INT | \
+ RKCANFD_REG_INT_TXE_FIFO_OV_INT | \
+ RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT | \
+ RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT | \
+ RKCANFD_REG_INT_BUS_OFF_INT | \
+ RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT | \
+ RKCANFD_REG_INT_RX_FIFO_FULL_INT | \
+ RKCANFD_REG_INT_ERROR_INT | \
+ RKCANFD_REG_INT_TX_ARBIT_FAIL_INT | \
+ RKCANFD_REG_INT_PASSIVE_ERROR_INT | \
+ RKCANFD_REG_INT_OVERLOAD_INT | \
+ RKCANFD_REG_INT_ERROR_WARNING_INT | \
+ RKCANFD_REG_INT_TX_FINISH_INT | \
+ RKCANFD_REG_INT_RX_FINISH_INT)
+
+#define RKCANFD_REG_INT_ALL_ERROR \
+ (RKCANFD_REG_INT_BUS_OFF_INT | \
+ RKCANFD_REG_INT_ERROR_INT | \
+ RKCANFD_REG_INT_PASSIVE_ERROR_INT | \
+ RKCANFD_REG_INT_ERROR_WARNING_INT)
+
+#define RKCANFD_REG_INT_MASK 0x010
+
+#define RKCANFD_REG_DMA_CTL 0x014
+#define RKCANFD_REG_DMA_CTL_DMA_RX_MODE BIT(1)
+#define RKCANFD_REG_DMA_CTL_DMA_TX_MODE BIT(9)
+
+#define RKCANFD_REG_BITTIMING 0x018
+#define RKCANFD_REG_BITTIMING_SAMPLE_MODE BIT(16)
+#define RKCANFD_REG_BITTIMING_SJW GENMASK(15, 14)
+#define RKCANFD_REG_BITTIMING_BRP GENMASK(13, 8)
+#define RKCANFD_REG_BITTIMING_TSEG2 GENMASK(6, 4)
+#define RKCANFD_REG_BITTIMING_TSEG1 GENMASK(3, 0)
+
+#define RKCANFD_REG_ARBITFAIL 0x028
+#define RKCANFD_REG_ARBITFAIL_ARBIT_FAIL_CODE GENMASK(6, 0)
+
+/* Register seems to be clear or read */
+#define RKCANFD_REG_ERROR_CODE 0x02c
+#define RKCANFD_REG_ERROR_CODE_PHASE BIT(29)
+#define RKCANFD_REG_ERROR_CODE_TYPE GENMASK(28, 26)
+#define RKCANFD_REG_ERROR_CODE_TYPE_BIT 0x0
+#define RKCANFD_REG_ERROR_CODE_TYPE_STUFF 0x1
+#define RKCANFD_REG_ERROR_CODE_TYPE_FORM 0x2
+#define RKCANFD_REG_ERROR_CODE_TYPE_ACK 0x3
+#define RKCANFD_REG_ERROR_CODE_TYPE_CRC 0x4
+#define RKCANFD_REG_ERROR_CODE_DIRECTION_RX BIT(25)
+#define RKCANFD_REG_ERROR_CODE_TX GENMASK(24, 16)
+#define RKCANFD_REG_ERROR_CODE_TX_OVERLOAD BIT(24)
+#define RKCANFD_REG_ERROR_CODE_TX_ERROR BIT(23)
+#define RKCANFD_REG_ERROR_CODE_TX_ACK BIT(22)
+#define RKCANFD_REG_ERROR_CODE_TX_ACK_EOF BIT(21)
+#define RKCANFD_REG_ERROR_CODE_TX_CRC BIT(20)
+#define RKCANFD_REG_ERROR_CODE_TX_STUFF_COUNT BIT(19)
+#define RKCANFD_REG_ERROR_CODE_TX_DATA BIT(18)
+#define RKCANFD_REG_ERROR_CODE_TX_SOF_DLC BIT(17)
+#define RKCANFD_REG_ERROR_CODE_TX_IDLE BIT(16)
+#define RKCANFD_REG_ERROR_CODE_RX GENMASK(15, 0)
+#define RKCANFD_REG_ERROR_CODE_RX_BUF_INT BIT(15)
+#define RKCANFD_REG_ERROR_CODE_RX_SPACE BIT(14)
+#define RKCANFD_REG_ERROR_CODE_RX_EOF BIT(13)
+#define RKCANFD_REG_ERROR_CODE_RX_ACK_LIM BIT(12)
+#define RKCANFD_REG_ERROR_CODE_RX_ACK BIT(11)
+#define RKCANFD_REG_ERROR_CODE_RX_CRC_LIM BIT(10)
+#define RKCANFD_REG_ERROR_CODE_RX_CRC BIT(9)
+#define RKCANFD_REG_ERROR_CODE_RX_STUFF_COUNT BIT(8)
+#define RKCANFD_REG_ERROR_CODE_RX_DATA BIT(7)
+#define RKCANFD_REG_ERROR_CODE_RX_DLC BIT(6)
+#define RKCANFD_REG_ERROR_CODE_RX_BRS_ESI BIT(5)
+#define RKCANFD_REG_ERROR_CODE_RX_RES BIT(4)
+#define RKCANFD_REG_ERROR_CODE_RX_FDF BIT(3)
+#define RKCANFD_REG_ERROR_CODE_RX_ID2_RTR BIT(2)
+#define RKCANFD_REG_ERROR_CODE_RX_SOF_IDE BIT(1)
+#define RKCANFD_REG_ERROR_CODE_RX_IDLE BIT(0)
+
+#define RKCANFD_REG_ERROR_CODE_NOACK \
+ (FIELD_PREP(RKCANFD_REG_ERROR_CODE_TYPE, \
+ RKCANFD_REG_ERROR_CODE_TYPE_ACK) | \
+ RKCANFD_REG_ERROR_CODE_TX_ACK_EOF | \
+ RKCANFD_REG_ERROR_CODE_RX_ACK)
+
+#define RKCANFD_REG_RXERRORCNT 0x034
+#define RKCANFD_REG_RXERRORCNT_RX_ERR_CNT GENMASK(7, 0)
+
+#define RKCANFD_REG_TXERRORCNT 0x038
+#define RKCANFD_REG_TXERRORCNT_TX_ERR_CNT GENMASK(8, 0)
+
+#define RKCANFD_REG_IDCODE 0x03c
+#define RKCANFD_REG_IDCODE_STANDARD_FRAME_ID GENMASK(10, 0)
+#define RKCANFD_REG_IDCODE_EXTENDED_FRAME_ID GENMASK(28, 0)
+
+#define RKCANFD_REG_IDMASK 0x040
+
+#define RKCANFD_REG_TXFRAMEINFO 0x050
+#define RKCANFD_REG_FRAMEINFO_FRAME_FORMAT BIT(7)
+#define RKCANFD_REG_FRAMEINFO_RTR BIT(6)
+#define RKCANFD_REG_FRAMEINFO_DATA_LENGTH GENMASK(3, 0)
+
+#define RKCANFD_REG_TXID 0x054
+#define RKCANFD_REG_TXID_TX_ID GENMASK(28, 0)
+
+#define RKCANFD_REG_TXDATA0 0x058
+#define RKCANFD_REG_TXDATA1 0x05C
+#define RKCANFD_REG_RXFRAMEINFO 0x060
+#define RKCANFD_REG_RXID 0x064
+#define RKCANFD_REG_RXDATA0 0x068
+#define RKCANFD_REG_RXDATA1 0x06c
+
+#define RKCANFD_REG_RTL_VERSION 0x070
+#define RKCANFD_REG_RTL_VERSION_MAJOR GENMASK(7, 4)
+#define RKCANFD_REG_RTL_VERSION_MINOR GENMASK(3, 0)
+
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING 0x100
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_SAMPLE_MODE BIT(31)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_SJW GENMASK(30, 24)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_BRP GENMASK(23, 16)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG2 GENMASK(14, 8)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG1 GENMASK(7, 0)
+
+#define RKCANFD_REG_FD_DATA_BITTIMING 0x104
+#define RKCANFD_REG_FD_DATA_BITTIMING_SAMPLE_MODE BIT(21)
+#define RKCANFD_REG_FD_DATA_BITTIMING_SJW GENMASK(20, 17)
+#define RKCANFD_REG_FD_DATA_BITTIMING_BRP GENMASK(16, 9)
+#define RKCANFD_REG_FD_DATA_BITTIMING_TSEG2 GENMASK(8, 5)
+#define RKCANFD_REG_FD_DATA_BITTIMING_TSEG1 GENMASK(4, 0)
+
+#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION 0x108
+#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET GENMASK(6, 1)
+#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_ENABLE BIT(0)
+
+#define RKCANFD_REG_TIMESTAMP_CTRL 0x10c
+/* datasheet says 6:1, which is wrong */
+#define RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE GENMASK(5, 1)
+#define RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_ENABLE BIT(0)
+
+#define RKCANFD_REG_TIMESTAMP 0x110
+
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL 0x114
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_CNT GENMASK(8, 5)
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_WATERMARK GENMASK(4, 1)
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_ENABLE BIT(0)
+
+#define RKCANFD_REG_RX_FIFO_CTRL 0x118
+#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_CNT GENMASK(6, 4)
+#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_FULL_WATERMARK GENMASK(3, 1)
+#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE BIT(0)
+
+#define RKCANFD_REG_AFC_CTRL 0x11c
+#define RKCANFD_REG_AFC_CTRL_UAF5 BIT(4)
+#define RKCANFD_REG_AFC_CTRL_UAF4 BIT(3)
+#define RKCANFD_REG_AFC_CTRL_UAF3 BIT(2)
+#define RKCANFD_REG_AFC_CTRL_UAF2 BIT(1)
+#define RKCANFD_REG_AFC_CTRL_UAF1 BIT(0)
+
+#define RKCANFD_REG_IDCODE0 0x120
+#define RKCANFD_REG_IDMASK0 0x124
+#define RKCANFD_REG_IDCODE1 0x128
+#define RKCANFD_REG_IDMASK1 0x12c
+#define RKCANFD_REG_IDCODE2 0x130
+#define RKCANFD_REG_IDMASK2 0x134
+#define RKCANFD_REG_IDCODE3 0x138
+#define RKCANFD_REG_IDMASK3 0x13c
+#define RKCANFD_REG_IDCODE4 0x140
+#define RKCANFD_REG_IDMASK4 0x144
+
+#define RKCANFD_REG_FD_TXFRAMEINFO 0x200
+#define RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT BIT(7)
+#define RKCANFD_REG_FD_FRAMEINFO_RTR BIT(6)
+#define RKCANFD_REG_FD_FRAMEINFO_FDF BIT(5)
+#define RKCANFD_REG_FD_FRAMEINFO_BRS BIT(4)
+#define RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH GENMASK(3, 0)
+
+#define RKCANFD_REG_FD_TXID 0x204
+#define RKCANFD_REG_FD_ID_EFF GENMASK(28, 0)
+#define RKCANFD_REG_FD_ID_SFF GENMASK(11, 0)
+
+#define RKCANFD_REG_FD_TXDATA0 0x208
+#define RKCANFD_REG_FD_TXDATA1 0x20c
+#define RKCANFD_REG_FD_TXDATA2 0x210
+#define RKCANFD_REG_FD_TXDATA3 0x214
+#define RKCANFD_REG_FD_TXDATA4 0x218
+#define RKCANFD_REG_FD_TXDATA5 0x21c
+#define RKCANFD_REG_FD_TXDATA6 0x220
+#define RKCANFD_REG_FD_TXDATA7 0x224
+#define RKCANFD_REG_FD_TXDATA8 0x228
+#define RKCANFD_REG_FD_TXDATA9 0x22c
+#define RKCANFD_REG_FD_TXDATA10 0x230
+#define RKCANFD_REG_FD_TXDATA11 0x234
+#define RKCANFD_REG_FD_TXDATA12 0x238
+#define RKCANFD_REG_FD_TXDATA13 0x23c
+#define RKCANFD_REG_FD_TXDATA14 0x240
+#define RKCANFD_REG_FD_TXDATA15 0x244
+
+#define RKCANFD_REG_FD_RXFRAMEINFO 0x300
+#define RKCANFD_REG_FD_RXID 0x304
+#define RKCANFD_REG_FD_RXTIMESTAMP 0x308
+#define RKCANFD_REG_FD_RXDATA0 0x30c
+#define RKCANFD_REG_FD_RXDATA1 0x310
+#define RKCANFD_REG_FD_RXDATA2 0x314
+#define RKCANFD_REG_FD_RXDATA3 0x318
+#define RKCANFD_REG_FD_RXDATA4 0x31c
+#define RKCANFD_REG_FD_RXDATA5 0x320
+#define RKCANFD_REG_FD_RXDATA6 0x320
+#define RKCANFD_REG_FD_RXDATA7 0x328
+#define RKCANFD_REG_FD_RXDATA8 0x32c
+#define RKCANFD_REG_FD_RXDATA9 0x330
+#define RKCANFD_REG_FD_RXDATA10 0x334
+#define RKCANFD_REG_FD_RXDATA11 0x338
+#define RKCANFD_REG_FD_RXDATA12 0x33c
+#define RKCANFD_REG_FD_RXDATA13 0x340
+#define RKCANFD_REG_FD_RXDATA14 0x344
+#define RKCANFD_REG_FD_RXDATA15 0x348
+
+#define RKCANFD_REG_RX_FIFO_RDATA 0x400
+#define RKCANFD_REG_TXE_FIFO_RDATA 0x500
+
+#define DEVICE_NAME "rockchip_canfd"
+#define RKCANFD_NAPI_WEIGHT 32
+#define RKCANFD_TXFIFO_DEPTH 2
+#define RKCANFD_TX_STOP_THRESHOLD 1
+#define RKCANFD_TX_START_THRESHOLD 1
+
+#define RKCANFD_TIMESTAMP_WORK_MAX_DELAY_SEC 60
+#define RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN (300 * MEGA)
+
+/* rk3568 CAN-FD Errata, as of Tue 07 Nov 2023 11:25:31 +08:00 */
+
+/* Erratum 1: The error frame sent by the CAN controller has an
+ * abnormal format.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_1 BIT(0)
+
+/* Erratum 2: The error frame sent after detecting a CRC error has an
+ * abnormal position.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_2 BIT(1)
+
+/* Erratum 3: Intermittent CRC calculation errors. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_3 BIT(2)
+
+/* Erratum 4: Intermittent occurrence of stuffing errors. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_4 BIT(3)
+
+/* Erratum 5: Counters related to the TXFIFO and RXFIFO exhibit
+ * abnormal counting behavior.
+ *
+ * The rk3568 CAN-FD errata sheet as of Tue 07 Nov 2023 11:25:31 +08:00
+ * states that only the rk3568v2 is affected by this erratum, but
+ * tests with the rk3568v2 and rk3568v3 show that the RX_FIFO_CNT is
+ * sometimes too high. This leads to CAN frames being read from the
+ * FIFO, which is then already empty.
+ *
+ * Further tests on the rk3568v2 and rk3568v3 show that in this
+ * situation (i.e. empty FIFO) all elements of the FIFO header
+ * (frameinfo, id, ts) contain the same data.
+ *
+ * On the rk3568v2 and rk3568v3, this problem only occurs extremely
+ * rarely with the standard clock of 300 MHz, but almost immediately
+ * at 80 MHz.
+ *
+ * To workaround this problem, check for empty FIFO with
+ * rkcanfd_fifo_header_empty() in rkcanfd_handle_rx_int_one() and exit
+ * early.
+ *
+ * To reproduce:
+ * assigned-clocks = <&cru CLK_CANx>;
+ * assigned-clock-rates = <80000000>;
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_5 BIT(4)
+
+/* Erratum 6: The CAN controller's transmission of extended frames may
+ * intermittently change into standard frames
+ *
+ * Work around this issue by activating self reception (RXSTX). If we
+ * have pending TX CAN frames, check all RX'ed CAN frames in
+ * rkcanfd_rxstx_filter().
+ *
+ * If it's a frame we've send and it's OK, call the TX complete
+ * handler: rkcanfd_handle_tx_done_one(). Mask the TX complete IRQ.
+ *
+ * If it's a frame we've send, but the CAN-ID is mangled, resend the
+ * original extended frame.
+ *
+ * To reproduce:
+ * host:
+ * canfdtest -evx -g can0
+ * candump any,0:80000000 -cexdtA
+ * dut:
+ * canfdtest -evx can0
+ * ethtool -S can0
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_6 BIT(5)
+
+/* Erratum 7: In the passive error state, the CAN controller's
+ * interframe space segment counting is inaccurate.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_7 BIT(6)
+
+/* Erratum 8: The Format-Error error flag is transmitted one bit
+ * later.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_8 BIT(7)
+
+/* Erratum 9: In the arbitration segment, the CAN controller will
+ * identify stuffing errors as arbitration failures.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_9 BIT(8)
+
+/* Erratum 10: Does not support the BUSOFF slow recovery mechanism. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_10 BIT(9)
+
+/* Erratum 11: Arbitration error. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_11 BIT(10)
+
+/* Erratum 12: A dominant bit at the third bit of the intermission may
+ * cause a transmission error.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_12 BIT(11)
+
+/* Tests on the rk3568v2 and rk3568v3 show that receiving certain
+ * CAN-FD frames trigger an Error Interrupt.
+ *
+ * - Form Error in RX Arbitration Phase: TX_IDLE RX_STUFF_COUNT (0x0a010100) CMD=0 RX=0 TX=0
+ * Error-Warning=1 Bus-Off=0
+ * To reproduce:
+ * host:
+ * cansend can0 002##01f
+ * DUT:
+ * candump any,0:0,#FFFFFFFF -cexdHtA
+ *
+ * - Form Error in RX Arbitration Phase: TX_IDLE RX_CRC (0x0a010200) CMD=0 RX=0 TX=0
+ * Error-Warning=1 Bus-Off=0
+ * To reproduce:
+ * host:
+ * cansend can0 002##07217010000000000
+ * DUT:
+ * candump any,0:0,#FFFFFFFF -cexdHtA
+ */
+#define RKCANFD_QUIRK_CANFD_BROKEN BIT(12)
+
+/* known issues with rk3568v3:
+ *
+ * - Overload situation during high bus load
+ * To reproduce:
+ * host:
+ * # add a 2nd CAN adapter to the CAN bus
+ * cangen can0 -I 1 -Li -Di -p10 -g 0.3
+ * cansequence -rve
+ * DUT:
+ * cangen can0 -I2 -L1 -Di -p10 -c10 -g 1 -e
+ * cansequence -rv -i 1
+ *
+ * - TX starvation after repeated Bus-Off
+ * To reproduce:
+ * host:
+ * sleep 3 && cangen can0 -I2 -Li -Di -p10 -g 0.0
+ * DUT:
+ * cangen can0 -I2 -Li -Di -p10 -g 0.05
+ */
+
+enum rkcanfd_model {
+ RKCANFD_MODEL_RK3568V2 = 0x35682,
+ RKCANFD_MODEL_RK3568V3 = 0x35683,
+};
+
+struct rkcanfd_devtype_data {
+ enum rkcanfd_model model;
+ u32 quirks;
+};
+
+struct rkcanfd_fifo_header {
+ u32 frameinfo;
+ u32 id;
+ u32 ts;
+};
+
+struct rkcanfd_stats {
+ struct u64_stats_sync syncp;
+
+ /* Erratum 5 */
+ u64_stats_t rx_fifo_empty_errors;
+
+ /* Erratum 6 */
+ u64_stats_t tx_extended_as_standard_errors;
+};
+
+struct rkcanfd_priv {
+ struct can_priv can;
+ struct can_rx_offload offload;
+ struct net_device *ndev;
+
+ void __iomem *regs;
+ unsigned int tx_head;
+ unsigned int tx_tail;
+
+ u32 reg_mode_default;
+ u32 reg_int_mask_default;
+ struct rkcanfd_devtype_data devtype_data;
+
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct delayed_work timestamp;
+ unsigned long work_delay_jiffies;
+
+ struct can_berr_counter bec;
+
+ struct rkcanfd_stats stats;
+
+ struct reset_control *reset;
+ struct clk_bulk_data *clks;
+ int clks_num;
+};
+
+static inline u32
+rkcanfd_read(const struct rkcanfd_priv *priv, u32 reg)
+{
+ return readl(priv->regs + reg);
+}
+
+static inline void
+rkcanfd_read_rep(const struct rkcanfd_priv *priv, u32 reg,
+ void *buf, unsigned int len)
+{
+ readsl(priv->regs + reg, buf, len / sizeof(u32));
+}
+
+static inline void
+rkcanfd_write(const struct rkcanfd_priv *priv, u32 reg, u32 val)
+{
+ writel(val, priv->regs + reg);
+}
+
+static inline u32
+rkcanfd_get_timestamp(const struct rkcanfd_priv *priv)
+{
+ return rkcanfd_read(priv, RKCANFD_REG_TIMESTAMP);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_head(const struct rkcanfd_priv *priv)
+{
+ return READ_ONCE(priv->tx_head) & (RKCANFD_TXFIFO_DEPTH - 1);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_tail(const struct rkcanfd_priv *priv)
+{
+ return READ_ONCE(priv->tx_tail) & (RKCANFD_TXFIFO_DEPTH - 1);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_pending(const struct rkcanfd_priv *priv)
+{
+ return READ_ONCE(priv->tx_head) - READ_ONCE(priv->tx_tail);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_free(const struct rkcanfd_priv *priv)
+{
+ return RKCANFD_TXFIFO_DEPTH - rkcanfd_get_tx_pending(priv);
+}
+
+void rkcanfd_ethtool_init(struct rkcanfd_priv *priv);
+
+int rkcanfd_handle_rx_int(struct rkcanfd_priv *priv);
+
+void rkcanfd_skb_set_timestamp(const struct rkcanfd_priv *priv,
+ struct sk_buff *skb, const u32 timestamp);
+void rkcanfd_timestamp_init(struct rkcanfd_priv *priv);
+void rkcanfd_timestamp_start(struct rkcanfd_priv *priv);
+void rkcanfd_timestamp_stop(struct rkcanfd_priv *priv);
+void rkcanfd_timestamp_stop_sync(struct rkcanfd_priv *priv);
+
+unsigned int rkcanfd_get_effective_tx_free(const struct rkcanfd_priv *priv);
+void rkcanfd_xmit_retry(struct rkcanfd_priv *priv);
+netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+void rkcanfd_handle_tx_done_one(struct rkcanfd_priv *priv, const u32 ts,
+ unsigned int *frame_len_p);
+
+#endif
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index fca5a9a1d857..2d1f715459d7 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -245,7 +245,7 @@ static void sja1000_isa_remove(struct platform_device *pdev)
static struct platform_driver sja1000_isa_driver = {
.probe = sja1000_isa_probe,
- .remove_new = sja1000_isa_remove,
+ .remove = sja1000_isa_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 33f0e46ab1c2..c42ebe9da55a 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -329,7 +329,7 @@ static void sp_remove(struct platform_device *pdev)
static struct platform_driver sp_driver = {
.probe = sp_probe,
- .remove_new = sp_remove,
+ .remove = sp_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = sp_of_table,
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index bd25137062c5..278ee8722770 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -854,7 +854,7 @@ static struct platform_driver softing_driver = {
.name = KBUILD_MODNAME,
},
.probe = softing_pdev_probe,
- .remove_new = softing_pdev_remove,
+ .remove = softing_pdev_remove,
};
module_platform_driver(softing_driver);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 3b8736ff0345..ec5c64006a16 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -752,7 +752,7 @@ static int mcp251x_hw_wake(struct spi_device *spi)
int ret;
/* Force wakeup interrupt to wake device, but don't execute IST */
- disable_irq(spi->irq);
+ disable_irq_nosync(spi->irq);
mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF);
/* Wait for oscillator startup timer after wake up */
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 3e7526274e34..3bc56517fe7a 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -12,7 +12,7 @@
// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
//
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
index 9e8e82cdba46..61b0d6fa52dd 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
@@ -97,7 +97,16 @@ void can_ram_get_layout(struct can_ram_layout *layout,
if (ring) {
u8 num_rx_coalesce = 0, num_tx_coalesce = 0;
- num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending);
+ /* If the ring parameters have been configured in
+ * CAN-CC mode, but and we are in CAN-FD mode now,
+ * they might be to big. Use the default CAN-FD values
+ * in this case.
+ */
+ num_rx = ring->rx_pending;
+ if (num_rx > layout->max_rx)
+ num_rx = layout->default_rx;
+
+ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
/* The ethtool doc says:
* To disable coalescing, set usecs = 0 and max_frames = 1.
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index 65150e762007..8c5be8d1c519 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -8,7 +8,7 @@
#include "mcp251xfd.h"
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static const struct regmap_config mcp251xfd_regmap_crc;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index 7bd2bcb5cf87..e684991fa391 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -12,7 +12,7 @@
// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
//
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "mcp251xfd.h"
#include "mcp251xfd-ram.h"
@@ -290,7 +290,7 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
const struct mcp251xfd_rx_ring *rx_ring;
u16 base = 0, ram_used;
u8 fifo_nr = 1;
- int i;
+ int err = 0, i;
netdev_reset_queue(priv->ndev);
@@ -386,10 +386,18 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
netdev_err(priv->ndev,
"Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
ram_used, MCP251XFD_RAM_SIZE);
- return -ENOMEM;
+ err = -ENOMEM;
}
- return 0;
+ if (priv->tx_obj_num_coalesce_irq &&
+ priv->tx_obj_num_coalesce_irq * 2 != priv->tx->obj_num) {
+ netdev_err(priv->ndev,
+ "Error during ring configuration, number of TEF coalescing buffers (%u) must be half of TEF buffers (%u).\n",
+ priv->tx_obj_num_coalesce_irq, priv->tx->obj_num);
+ err = -EINVAL;
+ }
+
+ return err;
}
void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
@@ -469,11 +477,25 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
/* switching from CAN-2.0 to CAN-FD mode or vice versa */
if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
+ const struct ethtool_ringparam ring = {
+ .rx_pending = priv->rx_obj_num,
+ .tx_pending = priv->tx->obj_num,
+ };
+ const struct ethtool_coalesce ec = {
+ .rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq,
+ .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq,
+ .tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq,
+ .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq,
+ };
struct can_ram_layout layout;
- can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
- priv->rx_obj_num = layout.default_rx;
- tx_ring->obj_num = layout.default_tx;
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, &ec, fd_mode);
+
+ priv->rx_obj_num = layout.cur_rx;
+ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+
+ tx_ring->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
}
if (fd_mode) {
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
index b1de8052a45c..747ae3e8a768 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
@@ -12,7 +12,7 @@
// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
//
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/bitfield.h>
#include "mcp251xfd.h"
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index ab8d01784686..360158c295d3 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -914,7 +914,7 @@ static struct platform_driver sun4i_can_driver = {
.of_match_table = sun4ican_of_match,
},
.probe = sun4ican_probe,
- .remove_new = sun4ican_remove,
+ .remove = sun4ican_remove,
};
module_platform_driver(sun4i_can_driver);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 5aab440074c6..644e8b8eb91e 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -1025,7 +1025,7 @@ static struct platform_driver ti_hecc_driver = {
.of_match_table = ti_hecc_dt_ids,
},
.probe = ti_hecc_probe,
- .remove_new = ti_hecc_remove,
+ .remove = ti_hecc_remove,
.suspend = ti_hecc_suspend,
.resume = ti_hecc_resume,
};
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 3e1fba12c0c3..9dae0c71a2e1 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -17,11 +17,12 @@ config CAN_EMS_USB
config CAN_ESD_USB
tristate "esd electronics gmbh CAN/USB interfaces"
help
- This driver adds supports for several CAN/USB interfaces
+ This driver adds support for several CAN/USB interfaces
from esd electronics gmbh (https://www.esd.eu).
The drivers supports the following devices:
- esd CAN-USB/2
+ - esd CAN-USB/3-FD
- esd CAN-USB/Micro
To compile this driver as a module, choose M here: the module
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
index 41a0e4261d15..03ad10b01867 100644
--- a/drivers/net/can/usb/esd_usb.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -3,7 +3,7 @@
* CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro
*
* Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu>
- * Copyright (C) 2022-2023 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
+ * Copyright (C) 2022-2024 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
*/
#include <linux/can.h>
@@ -1116,9 +1116,6 @@ static int esd_usb_3_set_bittiming(struct net_device *netdev)
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
flags |= ESD_USB_3_BAUDRATE_FLAG_LOM;
- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- flags |= ESD_USB_3_BAUDRATE_FLAG_TRS;
-
baud_x->nom.brp = cpu_to_le16(nom_bt->brp & (nom_btc->brp_max - 1));
baud_x->nom.sjw = cpu_to_le16(nom_bt->sjw & (nom_btc->sjw_max - 1));
baud_x->nom.tseg1 = cpu_to_le16((nom_bt->prop_seg + nom_bt->phase_seg1)
@@ -1219,7 +1216,6 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
switch (le16_to_cpu(dev->udev->descriptor.idProduct)) {
case ESD_USB_CANUSB3_PRODUCT_ID:
priv->can.clock.freq = ESD_USB_3_CAN_CLOCK;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const;
priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const;
diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c
index 4151b18fd045..1888ca1de7b6 100644
--- a/drivers/net/can/usb/etas_es58x/es581_4.c
+++ b/drivers/net/can/usb/etas_es58x/es581_4.c
@@ -9,7 +9,7 @@
* Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/kernel.h>
#include <linux/units.h>
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 5e3a72b7c469..71f24dc0a927 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -10,7 +10,7 @@
* Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/crc16.h>
#include <linux/ethtool.h>
#include <linux/kernel.h>
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index fa87b0b78e3e..84ffa1839bac 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -11,7 +11,7 @@
* Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/kernel.h>
#include <linux/units.h>
diff --git a/drivers/net/can/usb/f81604.c b/drivers/net/can/usb/f81604.c
index ec8cef7fd2d5..bc0c8903fe77 100644
--- a/drivers/net/can/usb/f81604.c
+++ b/drivers/net/can/usb/f81604.c
@@ -13,7 +13,7 @@
#include <linux/can/error.h>
#include <linux/can/platform/sja1000.h>
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned.h>
/* vendor and product id */
#define F81604_VENDOR_ID 0x2c42
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index ff10b3790d84..078496d9b7ba 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -22,6 +22,8 @@
*/
#include <linux/completion.h>
+#include <linux/ktime.h>
+#include <linux/math64.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/usb.h>
@@ -39,7 +41,6 @@
#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0)
#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1)
#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2)
-#define KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP BIT(3)
/* Device capabilities */
#define KVASER_USB_CAP_BERR_CAP 0x01
@@ -68,6 +69,7 @@ struct kvaser_usb_dev_card_data {
u32 ctrlmode_supported;
u32 capabilities;
struct kvaser_usb_dev_card_data_hydra hydra;
+ u32 usbcan_timestamp_msb;
};
/* Context for an outstanding, not yet ACKed, transmission */
@@ -216,4 +218,26 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev);
extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const;
+static inline ktime_t kvaser_usb_ticks_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ u64 ticks)
+{
+ return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq));
+}
+
+static inline ktime_t kvaser_usb_timestamp48_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ const __le16 *timestamp)
+{
+ u64 ticks = le16_to_cpu(timestamp[0]) |
+ (u64)(le16_to_cpu(timestamp[1])) << 16 |
+ (u64)(le16_to_cpu(timestamp[2])) << 32;
+
+ return kvaser_usb_ticks_to_ktime(cfg, ticks);
+}
+
+static inline ktime_t kvaser_usb_timestamp64_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ __le64 timestamp)
+{
+ return kvaser_usb_ticks_to_ktime(cfg, le64_to_cpu(timestamp));
+}
+
#endif /* KVASER_USB_H */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index daa34b532aa8..7d12776ab63e 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -94,7 +94,7 @@
#define USB_MINI_PCIE_1XCAN_PRODUCT_ID 0x011B
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
- .quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP,
+ .quirks = 0,
.ops = &kvaser_usb_hydra_dev_ops,
};
@@ -756,23 +756,12 @@ freeurb:
static const struct net_device_ops kvaser_usb_netdev_ops = {
.ndo_open = kvaser_usb_open,
.ndo_stop = kvaser_usb_close,
- .ndo_start_xmit = kvaser_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
-};
-
-static const struct net_device_ops kvaser_usb_netdev_ops_hwts = {
- .ndo_open = kvaser_usb_open,
- .ndo_stop = kvaser_usb_close,
.ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_start_xmit = kvaser_usb_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops kvaser_usb_ethtool_ops = {
- .get_ts_info = ethtool_op_get_ts_info,
-};
-
-static const struct ethtool_ops kvaser_usb_ethtool_ops_hwts = {
.get_ts_info = can_ethtool_op_get_ts_info_hwts,
};
@@ -859,13 +848,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
netdev->flags |= IFF_ECHO;
netdev->netdev_ops = &kvaser_usb_netdev_ops;
- if (driver_info->quirks & KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP) {
- netdev->netdev_ops = &kvaser_usb_netdev_ops_hwts;
- netdev->ethtool_ops = &kvaser_usb_ethtool_ops_hwts;
- } else {
- netdev->netdev_ops = &kvaser_usb_netdev_ops;
- netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
- }
+ netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->intf->dev);
netdev->dev_id = channel;
@@ -915,10 +898,8 @@ static int kvaser_usb_probe(struct usb_interface *intf,
ops = driver_info->ops;
err = ops->dev_setup_endpoints(dev);
- if (err) {
- dev_err(&intf->dev, "Cannot get usb endpoint(s)");
- return err;
- }
+ if (err)
+ return dev_err_probe(&intf->dev, err, "Cannot get usb endpoint(s)");
dev->udev = interface_to_usbdev(intf);
@@ -929,26 +910,20 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev->card_data.ctrlmode_supported = 0;
dev->card_data.capabilities = 0;
err = ops->dev_init_card(dev);
- if (err) {
- dev_err(&intf->dev,
- "Failed to initialize card, error %d\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(&intf->dev, err,
+ "Failed to initialize card\n");
err = ops->dev_get_software_info(dev);
- if (err) {
- dev_err(&intf->dev,
- "Cannot get software info, error %d\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(&intf->dev, err,
+ "Cannot get software info\n");
if (ops->dev_get_software_details) {
err = ops->dev_get_software_details(dev);
- if (err) {
- dev_err(&intf->dev,
- "Cannot get software details, error %d\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(&intf->dev, err,
+ "Cannot get software details\n");
}
if (WARN_ON(!dev->cfg))
@@ -962,18 +937,16 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
err = ops->dev_get_card_info(dev);
- if (err) {
- dev_err(&intf->dev, "Cannot get card info, error %d\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(&intf->dev, err,
+ "Cannot get card info\n");
if (ops->dev_get_capabilities) {
err = ops->dev_get_capabilities(dev);
if (err) {
- dev_err(&intf->dev,
- "Cannot get capabilities, error %d\n", err);
kvaser_usb_remove_interfaces(dev);
- return err;
+ return dev_err_probe(&intf->dev, err,
+ "Cannot get capabilities\n");
}
}
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index c7ba768dfe17..3764b263add3 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -10,7 +10,6 @@
* - Transition from CAN_STATE_ERROR_WARNING to CAN_STATE_ERROR_ACTIVE is only
* reported after a call to do_get_berr_counter(), since firmware does not
* distinguish between ERROR_WARNING and ERROR_ACTIVE.
- * - Hardware timestamps are not set for CAN Tx frames.
*/
#include <linux/completion.h>
@@ -261,6 +260,15 @@ struct kvaser_cmd_tx_can {
u8 reserved[11];
} __packed;
+struct kvaser_cmd_tx_ack {
+ __le32 id;
+ u8 data[8];
+ u8 dlc;
+ u8 flags;
+ __le16 timestamp[3];
+ u8 reserved0[8];
+} __packed;
+
struct kvaser_cmd_header {
u8 cmd_no;
/* The destination HE address is stored in 0..5 of he_addr.
@@ -297,6 +305,7 @@ struct kvaser_cmd {
struct kvaser_cmd_rx_can rx_can;
struct kvaser_cmd_tx_can tx_can;
+ struct kvaser_cmd_tx_ack tx_ack;
} __packed;
} __packed;
@@ -522,23 +531,25 @@ kvaser_usb_hydra_net_priv_from_cmd(const struct kvaser_usb *dev,
return priv;
}
-static ktime_t
-kvaser_usb_hydra_ktime_from_rx_cmd(const struct kvaser_usb_dev_cfg *cfg,
- const struct kvaser_cmd *cmd)
+static ktime_t kvaser_usb_hydra_ktime_from_cmd(const struct kvaser_usb_dev_cfg *cfg,
+ const struct kvaser_cmd *cmd)
{
- u64 ticks;
+ ktime_t hwtstamp = 0;
if (cmd->header.cmd_no == CMD_EXTENDED) {
struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd;
- ticks = le64_to_cpu(cmd_ext->rx_can.timestamp);
- } else {
- ticks = le16_to_cpu(cmd->rx_can.timestamp[0]);
- ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[1])) << 16;
- ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[2])) << 32;
+ if (cmd_ext->cmd_no_ext == CMD_RX_MESSAGE_FD)
+ hwtstamp = kvaser_usb_timestamp64_to_ktime(cfg, cmd_ext->rx_can.timestamp);
+ else if (cmd_ext->cmd_no_ext == CMD_TX_ACKNOWLEDGE_FD)
+ hwtstamp = kvaser_usb_timestamp64_to_ktime(cfg, cmd_ext->tx_ack.timestamp);
+ } else if (cmd->header.cmd_no == CMD_RX_MESSAGE) {
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(cfg, cmd->rx_can.timestamp);
+ } else if (cmd->header.cmd_no == CMD_TX_ACKNOWLEDGE) {
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(cfg, cmd->tx_ack.timestamp);
}
- return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq));
+ return hwtstamp;
}
static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
@@ -1175,6 +1186,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
bool one_shot_fail = false;
bool is_err_frame = false;
u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
+ struct sk_buff *skb;
priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
if (!priv)
@@ -1201,6 +1213,9 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
+ skb = priv->can.echo_skb[context->echo_index];
+ if (skb)
+ skb_hwtstamps(skb)->hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, cmd);
len = can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
@@ -1234,7 +1249,7 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
stats = &priv->netdev->stats;
flags = cmd->rx_can.flags;
- hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, cmd);
+ hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, cmd);
if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
@@ -1302,7 +1317,7 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
KVASER_USB_KCAN_DATA_DLC_SHIFT;
flags = le32_to_cpu(cmd->rx_can.flags);
- hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, std_cmd);
+ hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, std_cmd);
if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 23bd7574b1c7..6b9122ab1464 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -119,6 +119,10 @@
/* Extended CAN identifier flag */
#define KVASER_EXTENDED_FRAME BIT(31)
+/* USBCanII timestamp */
+#define KVASER_USB_USBCAN_CLK_OVERFLOW_MASK GENMASK(31, 16)
+#define KVASER_USB_USBCAN_TIMESTAMP_FACTOR 10
+
struct kvaser_cmd_simple {
u8 tid;
u8 channel;
@@ -235,6 +239,20 @@ struct kvaser_cmd_tx_acknowledge_header {
u8 tid;
} __packed;
+struct leaf_cmd_tx_acknowledge {
+ u8 channel;
+ u8 tid;
+ __le16 time[3];
+ u8 padding[2];
+} __packed;
+
+struct usbcan_cmd_tx_acknowledge {
+ u8 channel;
+ u8 tid;
+ __le16 time;
+ u8 padding[2];
+} __packed;
+
struct leaf_cmd_can_error_event {
u8 tid;
u8 flags;
@@ -281,6 +299,12 @@ struct usbcan_cmd_error_event {
__le16 padding;
} __packed;
+struct usbcan_cmd_clk_overflow_event {
+ u8 tid;
+ u8 padding;
+ __le32 time;
+} __packed;
+
struct kvaser_cmd_ctrl_mode {
u8 tid;
u8 channel;
@@ -347,6 +371,7 @@ struct kvaser_cmd {
struct leaf_cmd_error_event error_event;
struct kvaser_cmd_cap_req cap_req;
struct kvaser_cmd_cap_res cap_res;
+ struct leaf_cmd_tx_acknowledge tx_ack;
} __packed leaf;
union {
@@ -355,6 +380,8 @@ struct kvaser_cmd {
struct usbcan_cmd_chip_state_event chip_state_event;
struct usbcan_cmd_can_error_event can_error_event;
struct usbcan_cmd_error_event error_event;
+ struct usbcan_cmd_tx_acknowledge tx_ack;
+ struct usbcan_cmd_clk_overflow_event clk_overflow_event;
} __packed usbcan;
struct kvaser_cmd_tx_can tx_can;
@@ -370,7 +397,7 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
[CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
- [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.leaf.tx_ack),
[CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo),
[CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
[CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
@@ -388,15 +415,14 @@ static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
[CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
- [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.usbcan.tx_ack),
[CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo),
[CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
[CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
[CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
[CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event),
[CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
- /* ignored events: */
- [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
+ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = kvaser_fsize(u.usbcan.clk_overflow_event),
};
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
@@ -463,11 +489,27 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = {
.bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_16mhz = {
.clock = {
.freq = 16 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 16,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_24mhz = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 24,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_32mhz = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 32,
.bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
};
@@ -475,7 +517,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = {
.clock = {
.freq = 16 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 16,
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
@@ -483,7 +525,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = {
.clock = {
.freq = 24 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 24,
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
@@ -491,10 +533,19 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
.clock = {
.freq = 32 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 32,
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
+static inline ktime_t kvaser_usb_usbcan_timestamp_to_ktime(const struct kvaser_usb *dev,
+ __le16 timestamp)
+{
+ u64 ticks = le16_to_cpu(timestamp) |
+ dev->card_data.usbcan_timestamp_msb;
+
+ return kvaser_usb_ticks_to_ktime(dev->cfg, ticks * KVASER_USB_USBCAN_TIMESTAMP_FACTOR);
+}
+
static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
@@ -678,8 +729,19 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
/* Firmware expects bittiming parameters calculated for 16MHz
* clock, regardless of the actual clock
+ * Though, the reported freq is used for timestamps
*/
- dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg;
+ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_16mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_24mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_32mhz;
+ break;
+ }
} else {
switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
@@ -880,6 +942,8 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
struct kvaser_usb_net_priv *priv;
unsigned long flags;
u8 channel, tid;
+ struct sk_buff *skb;
+ ktime_t hwtstamp = 0;
channel = cmd->u.tx_acknowledge_header.channel;
tid = cmd->u.tx_acknowledge_header.tid;
@@ -901,14 +965,14 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
/* Sometimes the state change doesn't come after a bus-off event */
if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) {
- struct sk_buff *skb;
+ struct sk_buff *err_skb;
struct can_frame *cf;
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (skb) {
+ err_skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (err_skb) {
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx(skb);
+ netif_rx(err_skb);
} else {
netdev_err(priv->netdev,
"No memory left for err_skb\n");
@@ -919,9 +983,20 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.tx_ack.time);
+ break;
+ case KVASER_USBCAN:
+ hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.tx_ack.time);
+ break;
+ }
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+ skb = priv->can.echo_skb[context->echo_index];
+ if (skb)
+ skb_hwtstamps(skb)->hwtstamp = hwtstamp;
stats->tx_packets++;
stats->tx_bytes += can_get_echo_skb(priv->netdev,
context->echo_index, NULL);
@@ -1299,6 +1374,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
struct net_device_stats *stats;
u8 channel = cmd->u.rx_can_header.channel;
const u8 *rx_data = NULL; /* GCC */
+ ktime_t hwtstamp = 0;
if (channel >= dev->nchannels) {
dev_err(&dev->intf->dev,
@@ -1329,9 +1405,11 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
switch (dev->driver_info->family) {
case KVASER_LEAF:
rx_data = cmd->u.leaf.rx_can.data;
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.rx_can.time);
break;
case KVASER_USBCAN:
rx_data = cmd->u.usbcan.rx_can.data;
+ hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.rx_can.time);
break;
}
@@ -1375,6 +1453,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
memcpy(cf->data, &rx_data[6], cf->len);
}
+ skb_hwtstamps(skb)->hwtstamp = hwtstamp;
stats->rx_packets++;
if (!(cf->can_id & CAN_RTR_FLAG))
stats->rx_bytes += cf->len;
@@ -1508,7 +1587,7 @@ static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev,
complete(&priv->get_busparams_comp);
}
-static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
+static void kvaser_usb_leaf_handle_command(struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
if (kvaser_usb_leaf_verify_size(dev, cmd) < 0)
@@ -1554,12 +1633,15 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
kvaser_usb_leaf_get_busparams_reply(dev, cmd);
break;
- /* Ignored commands */
case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
if (dev->driver_info->family != KVASER_USBCAN)
goto warn;
+ dev->card_data.usbcan_timestamp_msb =
+ le32_to_cpu(cmd->u.usbcan.clk_overflow_event.time) &
+ KVASER_USB_USBCAN_CLK_OVERFLOW_MASK;
break;
+ /* Ignored commands */
case CMD_FLUSH_QUEUE_REPLY:
if (dev->driver_info->family != KVASER_LEAF)
goto warn;
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 47619e9cb005..41c0a1c399bf 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -6,7 +6,7 @@
* This driver is inspired by the 4.6.2 version of net/can/usb/usb_8dev.c
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index b211b6e283a2..c75df1755b3b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -8,7 +8,7 @@
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ethtool.h>
#include <linux/module.h>
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 3d68fef46ded..59f7cd8ceb39 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -901,11 +901,8 @@ int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF);
info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index d944911d7f05..436c0e4b0344 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -2103,7 +2103,7 @@ static void xcan_remove(struct platform_device *pdev)
static struct platform_driver xcan_driver = {
.probe = xcan_probe,
- .remove_new = xcan_remove,
+ .remove = xcan_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &xcan_dev_pm_ops,
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0783fc121bbb..c39cb119e760 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -27,6 +27,7 @@
#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <net/dsa.h>
#include "b53_regs.h"
@@ -224,6 +225,9 @@ static const struct b53_mib_desc b53_mibs_58xx[] = {
#define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
+#define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
+#define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
+
static int b53_do_vlan_op(struct b53_device *dev, u8 op)
{
unsigned int i;
@@ -2254,20 +2258,25 @@ static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
bool allow_10_100;
if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
+ return 0;
if (!dsa_is_cpu_port(ds, port))
return 0;
- enable_jumbo = (mtu >= JMS_MIN_SIZE);
- allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
+ enable_jumbo = (mtu > ETH_DATA_LEN);
+ allow_10_100 = !is63xx(dev);
return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
}
static int b53_get_max_mtu(struct dsa_switch *ds, int port)
{
- return JMS_MAX_SIZE;
+ struct b53_device *dev = ds->priv;
+
+ if (is5325(dev) || is5365(dev))
+ return B53_MAX_MTU_25;
+
+ return B53_MAX_MTU;
}
static const struct phylink_mac_ops b53_phylink_mac_ops = {
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index 897e5e8b3d69..31d070bf161a 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -343,10 +343,9 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
dev_set_drvdata(&mdiodev->dev, dev);
ret = b53_switch_register(dev);
- if (ret) {
- dev_err(&mdiodev->dev, "failed to register switch: %i\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&mdiodev->dev, ret,
+ "failed to register switch\n");
return ret;
}
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index 308f15d3832e..467da057579e 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -16,7 +16,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/delay.h>
#include <linux/kernel.h>
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 268949939636..d246f95d57ec 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
+#include <linux/iopoll.h>
#include <linux/mutex.h>
#include <linux/mii.h>
#include <linux/of.h>
@@ -839,6 +840,8 @@ static void lan9303_handle_reset(struct lan9303 *chip)
if (!chip->reset_gpio)
return;
+ gpiod_set_value_cansleep(chip->reset_gpio, 1);
+
if (chip->reset_duration != 0)
msleep(chip->reset_duration);
@@ -864,8 +867,34 @@ static int lan9303_disable_processing(struct lan9303 *chip)
static int lan9303_check_device(struct lan9303 *chip)
{
int ret;
+ int err;
u32 reg;
+ /* In I2C-managed configurations this polling loop will clash with
+ * switch's reading of EEPROM right after reset and this behaviour is
+ * not configurable. While lan9303_read() already has quite long retry
+ * timeout, seems not all cases are being detected as arbitration error.
+ *
+ * According to datasheet, EEPROM loader has 30ms timeout (in case of
+ * missing EEPROM).
+ *
+ * Loading of the largest supported EEPROM is expected to take at least
+ * 5.9s.
+ */
+ err = read_poll_timeout(lan9303_read, ret,
+ !ret && reg & LAN9303_HW_CFG_READY,
+ 20000, 6000000, false,
+ chip->regmap, LAN9303_HW_CFG, &reg);
+ if (ret) {
+ dev_err(chip->dev, "failed to read HW_CFG reg: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ if (err) {
+ dev_err(chip->dev, "HW_CFG not ready: 0x%08x\n", reg);
+ return err;
+ }
+
ret = lan9303_read(chip->regmap, LAN9303_CHIP_REV, &reg);
if (ret) {
dev_err(chip->dev, "failed to read chip revision register: %d\n",
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index c1b906c05a02..12a86585a77f 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -1,14 +1,17 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
- tristate "Microchip KSZ8795/KSZ9477/LAN937x series switch support"
+ tristate "Microchip KSZ8XXX/KSZ9XXX/LAN937X series switch support"
depends on NET_DSA
select NET_DSA_TAG_KSZ
select NET_DSA_TAG_NONE
select NET_IEEE8021Q_HELPERS
select DCB
help
- This driver adds support for Microchip KSZ9477 series switch and
- KSZ8795/KSZ88x3 switch chips.
+ This driver adds support for Microchip KSZ8, KSZ9 and
+ LAN937X series switch chips, being KSZ8863/8873,
+ KSZ8895/8864, KSZ8794/8795/8765,
+ KSZ9477/9897/9896/9567/8567, KSZ9893/9563/8563 and
+ LAN9370/9371/9372/9373/9374.
config NET_DSA_MICROCHIP_KSZ9477_I2C
tristate "KSZ series I2C connected switch driver"
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 1cfba1ec9355..9347cfb3d0b5 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o
ksz_switch-objs := ksz_common.o ksz_dcb.o
ksz_switch-objs += ksz9477.o ksz9477_acl.o ksz9477_tc_flower.o
-ksz_switch-objs += ksz8795.o
+ksz_switch-objs += ksz8.o
ksz_switch-objs += lan937x_main.o
ifdef CONFIG_NET_DSA_MICROCHIP_KSZ_PTP
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8.c
index d27b9c36d73f..da7110d67558 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8.c
@@ -1,6 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Microchip KSZ8795 switch driver
+ * Microchip KSZ8XXX series switch driver
+ *
+ * It supports the following switches:
+ * - KSZ8863, KSZ8873 aka KSZ88X3
+ * - KSZ8895, KSZ8864 aka KSZ8895 family
+ * - KSZ8794, KSZ8795, KSZ8765 aka KSZ87XX
+ * Note that it does NOT support:
+ * - KSZ8563, KSZ8567 - see KSZ9477 driver
*
* Copyright (C) 2017 Microchip Technology Inc.
* Tristram Ha <Tristram.Ha@microchip.com>
@@ -23,7 +30,7 @@
#include <linux/phylink.h>
#include "ksz_common.h"
-#include "ksz8795_reg.h"
+#include "ksz8_reg.h"
#include "ksz8.h"
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
@@ -38,6 +45,20 @@ static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bits, set ? bits : 0);
}
+/**
+ * ksz8_ind_write8 - EEE/ACL/PME indirect register write
+ * @dev: The device structure.
+ * @table: Function & table select, register 110.
+ * @addr: Indirect access control, register 111.
+ * @data: The data to be written.
+ *
+ * This function performs an indirect register write for EEE, ACL or
+ * PME switch functionalities. Both 8-bit registers 110 and 111 are
+ * written at once with ksz_write16, using the serial multiple write
+ * functionality.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
{
const u16 *regs;
@@ -58,6 +79,59 @@ static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
return ret;
}
+/**
+ * ksz8_ind_read8 - EEE/ACL/PME indirect register read
+ * @dev: The device structure.
+ * @table: Function & table select, register 110.
+ * @addr: Indirect access control, register 111.
+ * @val: The value read.
+ *
+ * This function performs an indirect register read for EEE, ACL or
+ * PME switch functionalities. Both 8-bit registers 110 and 111 are
+ * written at once with ksz_write16, using the serial multiple write
+ * functionality.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+static int ksz8_ind_read8(struct ksz_device *dev, u8 table, u16 addr, u8 *val)
+{
+ const u16 *regs;
+ u16 ctrl_addr;
+ int ret = 0;
+
+ regs = dev->info->regs;
+
+ mutex_lock(&dev->alu_mutex);
+
+ ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ if (!ret)
+ ret = ksz_read8(dev, regs[REG_IND_BYTE], val);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+int ksz8_pme_write8(struct ksz_device *dev, u32 reg, u8 value)
+{
+ return ksz8_ind_write8(dev, (u8)(reg >> 8), (u8)(reg), value);
+}
+
+int ksz8_pme_pread8(struct ksz_device *dev, int port, int offset, u8 *data)
+{
+ u8 table = (u8)(offset >> 8 | (port + 1));
+
+ return ksz8_ind_read8(dev, table, (u8)(offset), data);
+}
+
+int ksz8_pme_pwrite8(struct ksz_device *dev, int port, int offset, u8 data)
+{
+ u8 table = (u8)(offset >> 8 | (port + 1));
+
+ return ksz8_ind_write8(dev, table, (u8)(offset), data);
+}
+
int ksz8_reset_switch(struct ksz_device *dev)
{
if (ksz_is_ksz88x3(dev)) {
@@ -120,7 +194,9 @@ int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu)
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
return ksz8795_change_mtu(dev, frame_size);
- case KSZ8830_CHIP_ID:
+ case KSZ88X3_CHIP_ID:
+ case KSZ8864_CHIP_ID:
+ case KSZ8895_CHIP_ID:
return ksz8863_change_mtu(dev, frame_size);
}
@@ -317,7 +393,7 @@ static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
- if (ksz_is_ksz88x3(dev))
+ if (is_ksz88xx(dev))
ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt);
else
ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt);
@@ -325,7 +401,7 @@ void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
- if (ksz_is_ksz88x3(dev))
+ if (is_ksz88xx(dev))
return;
/* enable the port for flush/freeze function */
@@ -343,7 +419,8 @@ void ksz8_port_init_cnt(struct ksz_device *dev, int port)
struct ksz_port_mib *mib = &dev->ports[port].mib;
u64 *dropped;
- if (!ksz_is_ksz88x3(dev)) {
+ /* For KSZ8795 family. */
+ if (ksz_is_ksz87xx(dev)) {
/* flush all enabled port MIB counters */
ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true);
@@ -542,11 +619,11 @@ static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
shifts[STATIC_MAC_FWD_PORTS];
alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
- /* KSZ8795 family switches have STATIC_MAC_TABLE_USE_FID and
+ /* KSZ8795/KSZ8895 family switches have STATIC_MAC_TABLE_USE_FID and
* STATIC_MAC_TABLE_FID definitions off by 1 when doing read on the
* static MAC table compared to doing write.
*/
- if (ksz_is_ksz87xx(dev))
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev))
data_hi >>= 1;
alu->is_static = true;
alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
@@ -1545,6 +1622,7 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
+ const u16 *regs = dev->info->regs;
struct dsa_switch *ds = dev->ds;
const u32 *masks;
int queues;
@@ -1575,6 +1653,13 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
member = BIT(dsa_upstream_port(ds, port));
ksz8_cfg_port_member(dev, port, member);
+
+ /* Disable all WoL options by default. Otherwise
+ * ksz_switch_macaddr_get/put logic will not work properly.
+ * CPU port 4 has no WoL functionality.
+ */
+ if (ksz_is_ksz87xx(dev) && !cpu_port)
+ ksz8_pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], 0);
}
static void ksz88x3_config_rmii_clk(struct ksz_device *dev)
@@ -1617,7 +1702,8 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
- if (!ksz_is_ksz88x3(dev)) {
+ /* For KSZ8795 family. */
+ if (ksz_is_ksz87xx(dev)) {
ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote);
if (remote & KSZ8_PORT_FIBER_MODE)
p->fiber = 1;
@@ -1790,7 +1876,8 @@ int ksz8_enable_stp_addr(struct ksz_device *dev)
int ksz8_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
- int i;
+ const u16 *regs = dev->info->regs;
+ int i, ret = 0;
ds->mtu_enforcement_ingress = true;
@@ -1829,7 +1916,21 @@ int ksz8_setup(struct dsa_switch *ds)
for (i = 0; i < (dev->info->num_vlans / 4); i++)
ksz8_r_vlan_entries(dev, i);
- return ksz8_handle_global_errata(ds);
+ /* Make sure PME (WoL) is not enabled. If requested, it will
+ * be enabled by ksz_wol_pre_shutdown(). Otherwise, some PMICs
+ * do not like PME events changes before shutdown. PME only
+ * available on KSZ87xx family.
+ */
+ if (ksz_is_ksz87xx(dev)) {
+ ret = ksz8_pme_write8(dev, regs[REG_SW_PME_CTRL], 0);
+ if (!ret)
+ ret = ksz_rmw8(dev, REG_INT_ENABLE, INT_PME, 0);
+ }
+
+ if (!ret)
+ return ksz8_handle_global_errata(ds);
+ else
+ return ret;
}
void ksz8_get_caps(struct ksz_device *dev, int port,
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index ae43077e76c3..e1c79ff97123 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -54,6 +54,9 @@ int ksz8_reset_switch(struct ksz_device *dev);
int ksz8_switch_init(struct ksz_device *dev);
void ksz8_switch_exit(struct ksz_device *dev);
int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu);
+int ksz8_pme_write8(struct ksz_device *dev, u32 reg, u8 value);
+int ksz8_pme_pread8(struct ksz_device *dev, int port, int offset, u8 *data);
+int ksz8_pme_pwrite8(struct ksz_device *dev, int port, int offset, u8 data);
void ksz8_phylink_mac_link_up(struct phylink_config *config,
struct phy_device *phydev, unsigned int mode,
phy_interface_t interface, int speed, int duplex,
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index 5711a59e2ac9..a8bfcd917bf7 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -199,11 +199,11 @@ static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
static const struct of_device_id ksz8863_dt_ids[] = {
{
.compatible = "microchip,ksz8863",
- .data = &ksz_switch_chips[KSZ8830]
+ .data = &ksz_switch_chips[KSZ88X3]
},
{
.compatible = "microchip,ksz8873",
- .data = &ksz_switch_chips[KSZ8830]
+ .data = &ksz_switch_chips[KSZ88X3]
},
{ },
};
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8_reg.h
index 69566a5d9cda..329688603a58 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8_reg.h
@@ -1,13 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Microchip KSZ8795 register definitions
+ * Microchip KSZ8XXX series register definitions
+ *
+ * The base for these definitions is KSZ8795 but unless indicated
+ * differently by their prefix, they apply to all KSZ8 series
+ * devices. Registers and masks that do change are defined in
+ * dedicated structures in ksz_common.c.
*
* Copyright (c) 2017 Microchip Technology Inc.
* Tristram Ha <Tristram.Ha@microchip.com>
*/
-#ifndef __KSZ8795_REG_H
-#define __KSZ8795_REG_H
+#ifndef __KSZ8_REG_H
+#define __KSZ8_REG_H
#define KS_PORT_M 0x1F
@@ -359,8 +364,6 @@
#define REG_IND_DATA_1 0x77
#define REG_IND_DATA_0 0x78
-#define REG_IND_DATA_PME_EEE_ACL 0xA0
-
#define REG_INT_STATUS 0x7C
#define REG_INT_ENABLE 0x7D
@@ -704,8 +707,6 @@
#define KSZ8795_ID_LO 0x1550
#define KSZ8863_ID_LO 0x1430
-#define KSZ8795_SW_ID 0x8795
-
#define PHY_REG_LINK_MD 0x1D
#define PHY_START_CABLE_DIAG BIT(15)
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 425e20daf1e9..0ba658a72d8f 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -56,187 +56,6 @@ int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
REG_SW_MTU_MASK, frame_size);
}
-/**
- * ksz9477_handle_wake_reason - Handle wake reason on a specified port.
- * @dev: The device structure.
- * @port: The port number.
- *
- * This function reads the PME (Power Management Event) status register of a
- * specified port to determine the wake reason. If there is no wake event, it
- * returns early. Otherwise, it logs the wake reason which could be due to a
- * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register
- * is then cleared to acknowledge the handling of the wake event.
- *
- * Return: 0 on success, or an error code on failure.
- */
-static int ksz9477_handle_wake_reason(struct ksz_device *dev, int port)
-{
- u8 pme_status;
- int ret;
-
- ret = ksz_pread8(dev, port, REG_PORT_PME_STATUS, &pme_status);
- if (ret)
- return ret;
-
- if (!pme_status)
- return 0;
-
- dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port,
- pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "",
- pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "",
- pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : "");
-
- return ksz_pwrite8(dev, port, REG_PORT_PME_STATUS, pme_status);
-}
-
-/**
- * ksz9477_get_wol - Get Wake-on-LAN settings for a specified port.
- * @dev: The device structure.
- * @port: The port number.
- * @wol: Pointer to ethtool Wake-on-LAN settings structure.
- *
- * This function checks the PME Pin Control Register to see if PME Pin Output
- * Enable is set, indicating PME is enabled. If enabled, it sets the supported
- * and active WoL flags.
- */
-void ksz9477_get_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol)
-{
- u8 pme_ctrl;
- int ret;
-
- if (!dev->wakeup_source)
- return;
-
- wol->supported = WAKE_PHY;
-
- /* Check if the current MAC address on this port can be set
- * as global for WAKE_MAGIC support. The result may vary
- * dynamically based on other ports configurations.
- */
- if (ksz_is_port_mac_global_usable(dev->ds, port))
- wol->supported |= WAKE_MAGIC;
-
- ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl);
- if (ret)
- return;
-
- if (pme_ctrl & PME_WOL_MAGICPKT)
- wol->wolopts |= WAKE_MAGIC;
- if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY))
- wol->wolopts |= WAKE_PHY;
-}
-
-/**
- * ksz9477_set_wol - Set Wake-on-LAN settings for a specified port.
- * @dev: The device structure.
- * @port: The port number.
- * @wol: Pointer to ethtool Wake-on-LAN settings structure.
- *
- * This function configures Wake-on-LAN (WoL) settings for a specified port.
- * It validates the provided WoL options, checks if PME is enabled via the
- * switch's PME Pin Control Register, clears any previous wake reasons,
- * and sets the Magic Packet flag in the port's PME control register if
- * specified.
- *
- * Return: 0 on success, or other error codes on failure.
- */
-int ksz9477_set_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol)
-{
- u8 pme_ctrl = 0, pme_ctrl_old = 0;
- bool magic_switched_off;
- bool magic_switched_on;
- int ret;
-
- if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
- return -EINVAL;
-
- if (!dev->wakeup_source)
- return -EOPNOTSUPP;
-
- ret = ksz9477_handle_wake_reason(dev, port);
- if (ret)
- return ret;
-
- if (wol->wolopts & WAKE_MAGIC)
- pme_ctrl |= PME_WOL_MAGICPKT;
- if (wol->wolopts & WAKE_PHY)
- pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY;
-
- ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl_old);
- if (ret)
- return ret;
-
- if (pme_ctrl_old == pme_ctrl)
- return 0;
-
- magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) &&
- !(pme_ctrl & PME_WOL_MAGICPKT);
- magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) &&
- (pme_ctrl & PME_WOL_MAGICPKT);
-
- /* To keep reference count of MAC address, we should do this
- * operation only on change of WOL settings.
- */
- if (magic_switched_on) {
- ret = ksz_switch_macaddr_get(dev->ds, port, NULL);
- if (ret)
- return ret;
- } else if (magic_switched_off) {
- ksz_switch_macaddr_put(dev->ds);
- }
-
- ret = ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, pme_ctrl);
- if (ret) {
- if (magic_switched_on)
- ksz_switch_macaddr_put(dev->ds);
- return ret;
- }
-
- return 0;
-}
-
-/**
- * ksz9477_wol_pre_shutdown - Prepares the switch device for shutdown while
- * considering Wake-on-LAN (WoL) settings.
- * @dev: The switch device structure.
- * @wol_enabled: Pointer to a boolean which will be set to true if WoL is
- * enabled on any port.
- *
- * This function prepares the switch device for a safe shutdown while taking
- * into account the Wake-on-LAN (WoL) settings on the user ports. It updates
- * the wol_enabled flag accordingly to reflect whether WoL is active on any
- * port.
- */
-void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled)
-{
- struct dsa_port *dp;
- int ret;
-
- *wol_enabled = false;
-
- if (!dev->wakeup_source)
- return;
-
- dsa_switch_for_each_user_port(dp, dev->ds) {
- u8 pme_ctrl = 0;
-
- ret = ksz_pread8(dev, dp->index, REG_PORT_PME_CTRL, &pme_ctrl);
- if (!ret && pme_ctrl)
- *wol_enabled = true;
-
- /* make sure there are no pending wake events which would
- * prevent the device from going to sleep/shutdown.
- */
- ksz9477_handle_wake_reason(dev, dp->index);
- }
-
- /* Now we are save to enable PME pin. */
- if (*wol_enabled)
- ksz_write8(dev, REG_SW_PME_CTRL, PME_ENABLE);
-}
-
static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
{
unsigned int val;
@@ -427,54 +246,70 @@ void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
mutex_unlock(&p->mib.cnt_mutex);
}
-int ksz9477_errata_monitor(struct ksz_device *dev, int port,
- u64 tx_late_col)
+static int ksz9477_half_duplex_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col)
{
+ u8 lue_ctrl;
u32 pmavbc;
- u8 status;
u16 pqm;
int ret;
- ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status);
+ /* Errata DS80000754 recommends monitoring potential faults in
+ * half-duplex mode. The switch might not be able to communicate anymore
+ * in these states. If you see this message, please read the
+ * errata-sheet for more information:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf
+ * To workaround this issue, half-duplex mode should be avoided.
+ * A software reset could be implemented to recover from this state.
+ */
+ dev_warn_once(dev->dev,
+ "Half-duplex detected on port %d, transmission halt may occur\n",
+ port);
+ if (tx_late_col != 0) {
+ /* Transmission halt with late collisions */
+ dev_crit_once(dev->dev,
+ "TX late collisions detected, transmission may be halted on port %d\n",
+ port);
+ }
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &lue_ctrl);
if (ret)
return ret;
- if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status) == PORT_INTF_SPEED_NONE) &&
- !(status & PORT_INTF_FULL_DUPLEX)) {
- /* Errata DS80000754 recommends monitoring potential faults in
- * half-duplex mode. The switch might not be able to communicate anymore
- * in these states.
- * If you see this message, please read the errata-sheet for more information:
- * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf
- * To workaround this issue, half-duplex mode should be avoided.
- * A software reset could be implemented to recover from this state.
- */
- dev_warn_once(dev->dev,
- "Half-duplex detected on port %d, transmission halt may occur\n",
- port);
- if (tx_late_col != 0) {
- /* Transmission halt with late collisions */
- dev_crit_once(dev->dev,
- "TX late collisions detected, transmission may be halted on port %d\n",
- port);
- }
- ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &status);
+ if (lue_ctrl & SW_VLAN_ENABLE) {
+ ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm);
if (ret)
return ret;
- if (status & SW_VLAN_ENABLE) {
- ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm);
- if (ret)
- return ret;
- ret = ksz_read32(dev, REG_PMAVBC, &pmavbc);
- if (ret)
- return ret;
- if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) ||
- (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) {
- /* Transmission halt with Half-Duplex and VLAN */
- dev_crit_once(dev->dev,
- "resources out of limits, transmission may be halted\n");
- }
+
+ ret = ksz_read32(dev, REG_PMAVBC, &pmavbc);
+ if (ret)
+ return ret;
+
+ if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) ||
+ (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) {
+ /* Transmission halt with Half-Duplex and VLAN */
+ dev_crit_once(dev->dev,
+ "resources out of limits, transmission may be halted\n");
}
}
+
+ return ret;
+}
+
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col)
+{
+ u8 status;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status);
+ if (ret)
+ return ret;
+
+ if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status)
+ == PORT_INTF_SPEED_NONE) &&
+ !(status & PORT_INTF_FULL_DUPLEX)) {
+ ret = ksz9477_half_duplex_monitor(dev, port, tx_late_col);
+ }
+
return ret;
}
@@ -1188,6 +1023,7 @@ void ksz9477_port_queue_split(struct ksz_device *dev, int port)
void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
+ const u16 *regs = dev->info->regs;
struct dsa_switch *ds = dev->ds;
u16 data16;
u8 member;
@@ -1232,12 +1068,12 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz9477_port_acl_init(dev, port);
/* clear pending wake flags */
- ksz9477_handle_wake_reason(dev, port);
+ ksz_handle_wake_reason(dev, port);
/* Disable all WoL options by default. Otherwise
* ksz_switch_macaddr_get/put logic will not work properly.
*/
- ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, 0);
+ ksz_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], 0);
}
void ksz9477_config_cpu_port(struct dsa_switch *ds)
@@ -1334,6 +1170,7 @@ int ksz9477_enable_stp_addr(struct ksz_device *dev)
int ksz9477_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
int ret = 0;
ds->mtu_enforcement_ingress = true;
@@ -1364,13 +1201,11 @@ int ksz9477_setup(struct dsa_switch *ds)
/* enable global MIB counter freeze function */
ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
- /* Make sure PME (WoL) is not enabled. If requested, it will be
- * enabled by ksz9477_wol_pre_shutdown(). Otherwise, some PMICs do not
- * like PME events changes before shutdown.
+ /* Make sure PME (WoL) is not enabled. If requested, it will
+ * be enabled by ksz_wol_pre_shutdown(). Otherwise, some PMICs
+ * do not like PME events changes before shutdown.
*/
- ksz_write8(dev, REG_SW_PME_CTRL, 0);
-
- return 0;
+ return ksz_write8(dev, regs[REG_SW_PME_CTRL], 0);
}
u32 ksz9477_get_port_addr(int port, int offset)
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index 239a281da10b..d2166b0d881e 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -60,11 +60,6 @@ void ksz9477_switch_exit(struct ksz_device *dev);
void ksz9477_port_queue_split(struct ksz_device *dev, int port);
void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr);
void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr);
-void ksz9477_get_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
-int ksz9477_set_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
-void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled);
int ksz9477_port_acl_init(struct ksz_device *dev, int port);
void ksz9477_port_acl_free(struct ksz_device *dev, int port);
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index d5354c600ea1..04235c22bf40 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -38,11 +38,6 @@
#define SWITCH_REVISION_S 4
#define SWITCH_RESET 0x01
-#define REG_SW_PME_CTRL 0x0006
-
-#define PME_ENABLE BIT(1)
-#define PME_POLARITY BIT(0)
-
#define REG_GLOBAL_OPTIONS 0x000F
#define SW_GIGABIT_ABLE BIT(6)
@@ -807,13 +802,6 @@
#define REG_PORT_AVB_SR_1_TYPE 0x0008
#define REG_PORT_AVB_SR_2_TYPE 0x000A
-#define REG_PORT_PME_STATUS 0x0013
-#define REG_PORT_PME_CTRL 0x0017
-
-#define PME_WOL_MAGICPKT BIT(2)
-#define PME_WOL_LINKUP BIT(1)
-#define PME_WOL_ENERGY BIT(0)
-
#define REG_PORT_INT_STATUS 0x001B
#define REG_PORT_INT_MASK 0x001F
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 1491099528be..4e8710c7cb7b 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2,7 +2,7 @@
/*
* Microchip switch driver main logic
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
*/
#include <linux/delay.h>
@@ -246,16 +246,16 @@ static const struct ksz_drive_strength ksz9477_drive_strengths[] = {
{ SW_DRIVE_STRENGTH_28MA, 28000 },
};
-/* ksz8830_drive_strengths - Drive strength mapping for KSZ8830, KSZ8873, ..
+/* ksz88x3_drive_strengths - Drive strength mapping for KSZ8863, KSZ8873, ..
* variants.
* This values are documented in KSZ8873 and KSZ8863 datasheets.
*/
-static const struct ksz_drive_strength ksz8830_drive_strengths[] = {
+static const struct ksz_drive_strength ksz88x3_drive_strengths[] = {
{ 0, 8000 },
{ KSZ8873_DRIVE_STRENGTH_16MA, 16000 },
};
-static void ksz8830_phylink_mac_config(struct phylink_config *config,
+static void ksz88x3_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state);
static void ksz_phylink_mac_config(struct phylink_config *config,
@@ -265,8 +265,8 @@ static void ksz_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface);
-static const struct phylink_mac_ops ksz8830_phylink_mac_ops = {
- .mac_config = ksz8830_phylink_mac_config,
+static const struct phylink_mac_ops ksz88x3_phylink_mac_ops = {
+ .mac_config = ksz88x3_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz8_phylink_mac_link_up,
};
@@ -277,7 +277,7 @@ static const struct phylink_mac_ops ksz8_phylink_mac_ops = {
.mac_link_up = ksz8_phylink_mac_link_up,
};
-static const struct ksz_dev_ops ksz8_dev_ops = {
+static const struct ksz_dev_ops ksz88xx_dev_ops = {
.setup = ksz8_setup,
.get_port_addr = ksz8_get_port_addr,
.cfg_port_member = ksz8_cfg_port_member,
@@ -307,6 +307,44 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.init = ksz8_switch_init,
.exit = ksz8_switch_exit,
.change_mtu = ksz8_change_mtu,
+ .pme_write8 = ksz8_pme_write8,
+ .pme_pread8 = ksz8_pme_pread8,
+ .pme_pwrite8 = ksz8_pme_pwrite8,
+};
+
+static const struct ksz_dev_ops ksz87xx_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8_r_phy,
+ .w_phy = ksz8_w_phy,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .fdb_add = ksz8_fdb_add,
+ .fdb_del = ksz8_fdb_del,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+ .change_mtu = ksz8_change_mtu,
+ .pme_write8 = ksz8_pme_write8,
+ .pme_pread8 = ksz8_pme_pread8,
+ .pme_pwrite8 = ksz8_pme_pwrite8,
};
static void ksz9477_phylink_mac_link_up(struct phylink_config *config,
@@ -348,9 +386,9 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.mdb_add = ksz9477_mdb_add,
.mdb_del = ksz9477_mdb_del,
.change_mtu = ksz9477_change_mtu,
- .get_wol = ksz9477_get_wol,
- .set_wol = ksz9477_set_wol,
- .wol_pre_shutdown = ksz9477_wol_pre_shutdown,
+ .pme_write8 = ksz_write8,
+ .pme_pread8 = ksz_pread8,
+ .pme_pwrite8 = ksz_pwrite8,
.config_cpu_port = ksz9477_config_cpu_port,
.tc_cbs_set_cinc = ksz9477_tc_cbs_set_cinc,
.enable_stp_addr = ksz9477_enable_stp_addr,
@@ -423,6 +461,9 @@ static const u16 ksz8795_regs[] = {
[S_MULTICAST_CTRL] = 0x04,
[P_XMII_CTRL_0] = 0x06,
[P_XMII_CTRL_1] = 0x06,
+ [REG_SW_PME_CTRL] = 0x8003,
+ [REG_PORT_PME_STATUS] = 0x8003,
+ [REG_PORT_PME_CTRL] = 0x8007,
};
static const u32 ksz8795_masks[] = {
@@ -531,6 +572,61 @@ static u8 ksz8863_shifts[] = {
[DYNAMIC_MAC_SRC_PORT] = 20,
};
+static const u16 ksz8895_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x68,
+ [REG_IND_CTRL_0] = 0x6E,
+ [REG_IND_DATA_8] = 0x70,
+ [REG_IND_DATA_CHECK] = 0x72,
+ [REG_IND_DATA_HI] = 0x71,
+ [REG_IND_DATA_LO] = 0x75,
+ [REG_IND_MIB_CHECK] = 0x75,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x09,
+ [S_TAIL_TAG_CTRL] = 0x0C,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8895_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(7),
+ [SW_TAIL_TAG_ENABLE] = BIT(1),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(6, 0),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
+ [VLAN_TABLE_VALID] = BIT(12),
+ [STATIC_MAC_TABLE_VALID] = BIT(21),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(23),
+ [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(22),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(20, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(22, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
+};
+
+static const u8 ksz8895_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 7,
+ [VLAN_TABLE] = 13,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 24,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 29,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 27,
+ [DYNAMIC_MAC_SRC_PORT] = 24,
+};
+
static const u16 ksz9477_regs[] = {
[REG_SW_MAC_ADDR] = 0x0302,
[P_STP_CTRL] = 0x0B04,
@@ -539,6 +635,9 @@ static const u16 ksz9477_regs[] = {
[S_MULTICAST_CTRL] = 0x0331,
[P_XMII_CTRL_0] = 0x0300,
[P_XMII_CTRL_1] = 0x0301,
+ [REG_SW_PME_CTRL] = 0x0006,
+ [REG_PORT_PME_STATUS] = 0x0013,
+ [REG_PORT_PME_CTRL] = 0x0017,
};
static const u32 ksz9477_masks[] = {
@@ -1253,12 +1352,12 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.dev_name = "KSZ8795",
.num_vlans = 4096,
.num_alus = 0,
- .num_statics = 8,
+ .num_statics = 32,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
.num_ipms = 4,
- .ops = &ksz8_dev_ops,
+ .ops = &ksz87xx_dev_ops,
.phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1294,12 +1393,12 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.dev_name = "KSZ8794",
.num_vlans = 4096,
.num_alus = 0,
- .num_statics = 8,
+ .num_statics = 32,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
.num_ipms = 4,
- .ops = &ksz8_dev_ops,
+ .ops = &ksz87xx_dev_ops,
.phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1321,12 +1420,12 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.dev_name = "KSZ8765",
.num_vlans = 4096,
.num_alus = 0,
- .num_statics = 8,
+ .num_statics = 32,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
.num_ipms = 4,
- .ops = &ksz8_dev_ops,
+ .ops = &ksz87xx_dev_ops,
.phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1343,8 +1442,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true, false},
},
- [KSZ8830] = {
- .chip_id = KSZ8830_CHIP_ID,
+ [KSZ88X3] = {
+ .chip_id = KSZ88X3_CHIP_ID,
.dev_name = "KSZ8863/KSZ8873",
.num_vlans = 16,
.num_alus = 0,
@@ -1353,8 +1452,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 3,
.num_tx_queues = 4,
.num_ipms = 4,
- .ops = &ksz8_dev_ops,
- .phylink_mac_ops = &ksz8830_phylink_mac_ops,
+ .ops = &ksz88xx_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
.mib_names = ksz88xx_mib_names,
.mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1368,6 +1467,61 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.rd_table = &ksz8873_register_set,
},
+ [KSZ8864] = {
+ /* WARNING
+ * =======
+ * KSZ8864 is similar to KSZ8895, except the first port
+ * does not exist.
+ * external cpu
+ * KSZ8864 1,2,3 4
+ * KSZ8895 0,1,2,3 4
+ * port_cnt is configured as 5, even though it is 4
+ */
+ .chip_id = KSZ8864_CHIP_ID,
+ .dev_name = "KSZ8864",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 32,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz88xx_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8895_regs,
+ .masks = ksz8895_masks,
+ .shifts = ksz8895_shifts,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .internal_phy = {false, true, true, true, false},
+ },
+
+ [KSZ8895] = {
+ .chip_id = KSZ8895_CHIP_ID,
+ .dev_name = "KSZ8895",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 32,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz88xx_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8895_regs,
+ .masks = ksz8895_masks,
+ .shifts = ksz8895_shifts,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
[KSZ9477] = {
.chip_id = KSZ9477_CHIP_ID,
.dev_name = "KSZ9477",
@@ -2570,7 +2724,7 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
struct ksz_device *dev = ds->priv;
switch (dev->chip_id) {
- case KSZ8830_CHIP_ID:
+ case KSZ88X3_CHIP_ID:
/* Silicon Errata Sheet (DS80000830A):
* Port 1 does not work with LinkMD Cable-Testing.
* Port 1 does not respond to received PAUSE control frames.
@@ -2893,12 +3047,10 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
struct ksz_device *dev = ds->priv;
enum dsa_tag_protocol proto = DSA_TAG_PROTO_NONE;
- if (dev->chip_id == KSZ8795_CHIP_ID ||
- dev->chip_id == KSZ8794_CHIP_ID ||
- dev->chip_id == KSZ8765_CHIP_ID)
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev))
proto = DSA_TAG_PROTO_KSZ8795;
- if (dev->chip_id == KSZ8830_CHIP_ID ||
+ if (dev->chip_id == KSZ88X3_CHIP_ID ||
dev->chip_id == KSZ8563_CHIP_ID ||
dev->chip_id == KSZ9893_CHIP_ID ||
dev->chip_id == KSZ9563_CHIP_ID)
@@ -3010,7 +3162,9 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
- case KSZ8830_CHIP_ID:
+ case KSZ88X3_CHIP_ID:
+ case KSZ8864_CHIP_ID:
+ case KSZ8895_CHIP_ID:
return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
case KSZ8563_CHIP_ID:
case KSZ8567_CHIP_ID:
@@ -3180,7 +3334,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
return interface;
}
-static void ksz8830_phylink_mac_config(struct phylink_config *config,
+static void ksz88x3_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
@@ -3364,10 +3518,22 @@ static int ksz_switch_detect(struct ksz_device *dev)
break;
case KSZ88_FAMILY_ID:
if (id2 == KSZ88_CHIP_ID_63)
- dev->chip_id = KSZ8830_CHIP_ID;
+ dev->chip_id = KSZ88X3_CHIP_ID;
else
return -ENODEV;
break;
+ case KSZ8895_FAMILY_ID:
+ if (id2 == KSZ8895_CHIP_ID_95 ||
+ id2 == KSZ8895_CHIP_ID_95R)
+ dev->chip_id = KSZ8895_CHIP_ID;
+ else
+ return -ENODEV;
+ ret = ksz_read8(dev, REG_KSZ8864_CHIP_ID, &id4);
+ if (ret)
+ return ret;
+ if (id4 & SW_KSZ8864)
+ dev->chip_id = KSZ8864_CHIP_ID;
+ break;
default:
ret = ksz_read32(dev, REG_CHIP_ID0, &id32);
if (ret)
@@ -3742,24 +3908,214 @@ static int ksz_setup_tc(struct dsa_switch *ds, int port,
}
}
+/**
+ * ksz_handle_wake_reason - Handle wake reason on a specified port.
+ * @dev: The device structure.
+ * @port: The port number.
+ *
+ * This function reads the PME (Power Management Event) status register of a
+ * specified port to determine the wake reason. If there is no wake event, it
+ * returns early. Otherwise, it logs the wake reason which could be due to a
+ * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register
+ * is then cleared to acknowledge the handling of the wake event.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int ksz_handle_wake_reason(struct ksz_device *dev, int port)
+{
+ const struct ksz_dev_ops *ops = dev->dev_ops;
+ const u16 *regs = dev->info->regs;
+ u8 pme_status;
+ int ret;
+
+ ret = ops->pme_pread8(dev, port, regs[REG_PORT_PME_STATUS],
+ &pme_status);
+ if (ret)
+ return ret;
+
+ if (!pme_status)
+ return 0;
+
+ dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port,
+ pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "",
+ pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "",
+ pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : "");
+
+ return ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_STATUS],
+ pme_status);
+}
+
+/**
+ * ksz_get_wol - Get Wake-on-LAN settings for a specified port.
+ * @ds: The dsa_switch structure.
+ * @port: The port number.
+ * @wol: Pointer to ethtool Wake-on-LAN settings structure.
+ *
+ * This function checks the device PME wakeup_source flag and chip_id.
+ * If enabled and supported, it sets the supported and active WoL
+ * flags.
+ */
static void ksz_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ u8 pme_ctrl;
+ int ret;
- if (dev->dev_ops->get_wol)
- dev->dev_ops->get_wol(dev, port, wol);
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return;
+
+ if (!dev->wakeup_source)
+ return;
+
+ wol->supported = WAKE_PHY;
+
+ /* Check if the current MAC address on this port can be set
+ * as global for WAKE_MAGIC support. The result may vary
+ * dynamically based on other ports configurations.
+ */
+ if (ksz_is_port_mac_global_usable(dev->ds, port))
+ wol->supported |= WAKE_MAGIC;
+
+ ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL],
+ &pme_ctrl);
+ if (ret)
+ return;
+
+ if (pme_ctrl & PME_WOL_MAGICPKT)
+ wol->wolopts |= WAKE_MAGIC;
+ if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY))
+ wol->wolopts |= WAKE_PHY;
}
+/**
+ * ksz_set_wol - Set Wake-on-LAN settings for a specified port.
+ * @ds: The dsa_switch structure.
+ * @port: The port number.
+ * @wol: Pointer to ethtool Wake-on-LAN settings structure.
+ *
+ * This function configures Wake-on-LAN (WoL) settings for a specified
+ * port. It validates the provided WoL options, checks if PME is
+ * enabled and supported, clears any previous wake reasons, and sets
+ * the Magic Packet flag in the port's PME control register if
+ * specified.
+ *
+ * Return: 0 on success, or other error codes on failure.
+ */
static int ksz_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
+ u8 pme_ctrl = 0, pme_ctrl_old = 0;
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ bool magic_switched_off;
+ bool magic_switched_on;
+ int ret;
- if (dev->dev_ops->set_wol)
- return dev->dev_ops->set_wol(dev, port, wol);
+ if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
- return -EOPNOTSUPP;
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return -EOPNOTSUPP;
+
+ if (!dev->wakeup_source)
+ return -EOPNOTSUPP;
+
+ ret = ksz_handle_wake_reason(dev, port);
+ if (ret)
+ return ret;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ pme_ctrl |= PME_WOL_MAGICPKT;
+ if (wol->wolopts & WAKE_PHY)
+ pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY;
+
+ ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL],
+ &pme_ctrl_old);
+ if (ret)
+ return ret;
+
+ if (pme_ctrl_old == pme_ctrl)
+ return 0;
+
+ magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) &&
+ !(pme_ctrl & PME_WOL_MAGICPKT);
+ magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) &&
+ (pme_ctrl & PME_WOL_MAGICPKT);
+
+ /* To keep reference count of MAC address, we should do this
+ * operation only on change of WOL settings.
+ */
+ if (magic_switched_on) {
+ ret = ksz_switch_macaddr_get(dev->ds, port, NULL);
+ if (ret)
+ return ret;
+ } else if (magic_switched_off) {
+ ksz_switch_macaddr_put(dev->ds);
+ }
+
+ ret = dev->dev_ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL],
+ pme_ctrl);
+ if (ret) {
+ if (magic_switched_on)
+ ksz_switch_macaddr_put(dev->ds);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_wol_pre_shutdown - Prepares the switch device for shutdown while
+ * considering Wake-on-LAN (WoL) settings.
+ * @dev: The switch device structure.
+ * @wol_enabled: Pointer to a boolean which will be set to true if WoL is
+ * enabled on any port.
+ *
+ * This function prepares the switch device for a safe shutdown while taking
+ * into account the Wake-on-LAN (WoL) settings on the user ports. It updates
+ * the wol_enabled flag accordingly to reflect whether WoL is active on any
+ * port.
+ */
+static void ksz_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled)
+{
+ const struct ksz_dev_ops *ops = dev->dev_ops;
+ const u16 *regs = dev->info->regs;
+ u8 pme_pin_en = PME_ENABLE;
+ struct dsa_port *dp;
+ int ret;
+
+ *wol_enabled = false;
+
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return;
+
+ if (!dev->wakeup_source)
+ return;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ u8 pme_ctrl = 0;
+
+ ret = ops->pme_pread8(dev, dp->index,
+ regs[REG_PORT_PME_CTRL], &pme_ctrl);
+ if (!ret && pme_ctrl)
+ *wol_enabled = true;
+
+ /* make sure there are no pending wake events which would
+ * prevent the device from going to sleep/shutdown.
+ */
+ ksz_handle_wake_reason(dev, dp->index);
+ }
+
+ /* Now we are save to enable PME pin. */
+ if (*wol_enabled) {
+ if (dev->pme_active_high)
+ pme_pin_en |= PME_POLARITY;
+ ops->pme_write8(dev, regs[REG_SW_PME_CTRL], pme_pin_en);
+ if (ksz_is_ksz87xx(dev))
+ ksz_write8(dev, KSZ87XX_REG_INT_EN, KSZ87XX_INT_PME_MASK);
+ }
}
static int ksz_port_set_mac_address(struct dsa_switch *ds, int port,
@@ -4072,8 +4428,7 @@ void ksz_switch_shutdown(struct ksz_device *dev)
{
bool wol_enabled = false;
- if (dev->dev_ops->wol_pre_shutdown)
- dev->dev_ops->wol_pre_shutdown(dev, &wol_enabled);
+ ksz_wol_pre_shutdown(dev, &wol_enabled);
if (dev->dev_ops->reset && !wol_enabled)
dev->dev_ops->reset(dev);
@@ -4237,24 +4592,24 @@ static int ksz9477_drive_strength_write(struct ksz_device *dev,
}
/**
- * ksz8830_drive_strength_write() - Set the drive strength configuration for
- * KSZ8830 compatible chip variants.
+ * ksz88x3_drive_strength_write() - Set the drive strength configuration for
+ * KSZ8863 compatible chip variants.
* @dev: ksz device
* @props: Array of drive strength properties to be set
* @num_props: Number of properties in the array
*
- * This function applies the specified drive strength settings to KSZ8830 chip
+ * This function applies the specified drive strength settings to KSZ88X3 chip
* variants (KSZ8873, KSZ8863).
* It ensures the configurations align with what the chip variant supports and
* warns or errors out on unsupported settings.
*
* Return: 0 on success, error code otherwise
*/
-static int ksz8830_drive_strength_write(struct ksz_device *dev,
+static int ksz88x3_drive_strength_write(struct ksz_device *dev,
struct ksz_driver_strength_prop *props,
int num_props)
{
- size_t array_size = ARRAY_SIZE(ksz8830_drive_strengths);
+ size_t array_size = ARRAY_SIZE(ksz88x3_drive_strengths);
int microamp;
int i, ret;
@@ -4267,10 +4622,10 @@ static int ksz8830_drive_strength_write(struct ksz_device *dev,
}
microamp = props[KSZ_DRIVER_STRENGTH_IO].value;
- ret = ksz_drive_strength_to_reg(ksz8830_drive_strengths, array_size,
+ ret = ksz_drive_strength_to_reg(ksz88x3_drive_strengths, array_size,
microamp);
if (ret < 0) {
- ksz_drive_strength_error(dev, ksz8830_drive_strengths,
+ ksz_drive_strength_error(dev, ksz88x3_drive_strengths,
array_size, microamp);
return ret;
}
@@ -4330,8 +4685,8 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
return 0;
switch (dev->chip_id) {
- case KSZ8830_CHIP_ID:
- return ksz8830_drive_strength_write(dev, of_props,
+ case KSZ88X3_CHIP_ID:
+ return ksz88x3_drive_strength_write(dev, of_props,
ARRAY_SIZE(of_props));
case KSZ8795_CHIP_ID:
case KSZ8794_CHIP_ID:
@@ -4362,7 +4717,7 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
int ksz_switch_register(struct ksz_device *dev)
{
const struct ksz_chip_data *info;
- struct device_node *port, *ports;
+ struct device_node *ports;
phy_interface_t interface;
unsigned int port_num;
int ret;
@@ -4448,12 +4803,11 @@ int ksz_switch_register(struct ksz_device *dev)
if (!ports)
ports = of_get_child_by_name(dev->dev->of_node, "ports");
if (ports) {
- for_each_available_child_of_node(ports, port) {
+ for_each_available_child_of_node_scoped(ports, port) {
if (of_property_read_u32(port, "reg",
&port_num))
continue;
if (!(dev->port_mask & BIT(port_num))) {
- of_node_put(port);
of_node_put(ports);
return -EINVAL;
}
@@ -4475,6 +4829,8 @@ int ksz_switch_register(struct ksz_device *dev)
dev->wakeup_source = of_property_read_bool(dev->dev->of_node,
"wakeup-source");
+ dev->pme_active_high = of_property_read_bool(dev->dev->of_node,
+ "microchip,pme-active-high");
}
ret = dsa_register_switch(dev->ds);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 5f0a628b9849..bec846e20682 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Microchip switch driver common header
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
*/
#ifndef __KSZ_COMMON_H
@@ -174,6 +174,7 @@ struct ksz_device {
bool synclko_125;
bool synclko_disable;
bool wakeup_source;
+ bool pme_active_high;
struct vlan_table *vlan_cache;
@@ -199,7 +200,9 @@ enum ksz_model {
KSZ8795,
KSZ8794,
KSZ8765,
- KSZ8830,
+ KSZ88X3,
+ KSZ8864,
+ KSZ8895,
KSZ9477,
KSZ9896,
KSZ9897,
@@ -235,6 +238,9 @@ enum ksz_regs {
S_MULTICAST_CTRL,
P_XMII_CTRL_0,
P_XMII_CTRL_1,
+ REG_SW_PME_CTRL,
+ REG_PORT_PME_STATUS,
+ REG_PORT_PME_CTRL,
};
enum ksz_masks {
@@ -354,6 +360,11 @@ struct ksz_dev_ops {
void (*get_caps)(struct ksz_device *dev, int port,
struct phylink_config *config);
int (*change_mtu)(struct ksz_device *dev, int port, int mtu);
+ int (*pme_write8)(struct ksz_device *dev, u32 reg, u8 value);
+ int (*pme_pread8)(struct ksz_device *dev, int port, int offset,
+ u8 *data);
+ int (*pme_pwrite8)(struct ksz_device *dev, int port, int offset,
+ u8 data);
void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
void (*phylink_mac_link_up)(struct ksz_device *dev, int port,
@@ -363,11 +374,6 @@ struct ksz_dev_ops {
int duplex, bool tx_pause, bool rx_pause);
void (*setup_rgmii_delay)(struct ksz_device *dev, int port);
int (*tc_cbs_set_cinc)(struct ksz_device *dev, int port, u32 val);
- void (*get_wol)(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
- int (*set_wol)(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
- void (*wol_pre_shutdown)(struct ksz_device *dev, bool *wol_enabled);
void (*config_cpu_port)(struct dsa_switch *ds);
int (*enable_stp_addr)(struct ksz_device *dev);
int (*reset)(struct ksz_device *dev);
@@ -391,6 +397,7 @@ int ksz_switch_macaddr_get(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
void ksz_switch_macaddr_put(struct dsa_switch *ds);
void ksz_switch_shutdown(struct ksz_device *dev);
+int ksz_handle_wake_reason(struct ksz_device *dev, int port);
/* Common register access functions */
static inline struct regmap *ksz_regmap_8(struct ksz_device *dev)
@@ -621,12 +628,29 @@ static inline bool ksz_is_ksz87xx(struct ksz_device *dev)
static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
{
- return dev->chip_id == KSZ8830_CHIP_ID;
+ return dev->chip_id == KSZ88X3_CHIP_ID;
+}
+
+static inline bool ksz_is_8895_family(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8895_CHIP_ID ||
+ dev->chip_id == KSZ8864_CHIP_ID;
}
static inline bool is_ksz8(struct ksz_device *dev)
{
- return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev);
+ return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev) ||
+ ksz_is_8895_family(dev);
+}
+
+static inline bool is_ksz88xx(struct ksz_device *dev)
+{
+ return ksz_is_ksz88x3(dev) || ksz_is_8895_family(dev);
+}
+
+static inline bool is_ksz9477(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ9477_CHIP_ID;
}
static inline int is_lan937x(struct ksz_device *dev)
@@ -655,6 +679,7 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
#define SW_FAMILY_ID_M GENMASK(15, 8)
#define KSZ87_FAMILY_ID 0x87
#define KSZ88_FAMILY_ID 0x88
+#define KSZ8895_FAMILY_ID 0x95
#define KSZ8_PORT_STATUS_0 0x08
#define KSZ8_PORT_FIBER_MODE BIT(7)
@@ -663,6 +688,12 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
#define KSZ87_CHIP_ID_94 0x6
#define KSZ87_CHIP_ID_95 0x9
#define KSZ88_CHIP_ID_63 0x3
+#define KSZ8895_CHIP_ID_95 0x4
+#define KSZ8895_CHIP_ID_95R 0x6
+
+/* KSZ8895 specific register */
+#define REG_KSZ8864_CHIP_ID 0xFE
+#define SW_KSZ8864 BIT(7)
#define SW_REV_ID_M GENMASK(7, 4)
@@ -695,6 +726,17 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
#define P_MII_MAC_MODE BIT(2)
#define P_MII_SEL_M 0x3
+/* KSZ9477, KSZ87xx Wake-on-LAN (WoL) masks */
+#define PME_WOL_MAGICPKT BIT(2)
+#define PME_WOL_LINKUP BIT(1)
+#define PME_WOL_ENERGY BIT(0)
+
+#define PME_ENABLE BIT(1)
+#define PME_POLARITY BIT(0)
+
+#define KSZ87XX_REG_INT_EN 0x7D
+#define KSZ87XX_INT_PME_MASK BIT(4)
+
/* Interrupt */
#define REG_SW_PORT_INT_STATUS__1 0x001B
#define REG_SW_PORT_INT_MASK__1 0x001F
diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c
index 086bc9b3cf53..30b4a6186e38 100644
--- a/drivers/net/dsa/microchip/ksz_dcb.c
+++ b/drivers/net/dsa/microchip/ksz_dcb.c
@@ -113,7 +113,7 @@ static void ksz_get_default_port_prio_reg(struct ksz_device *dev, int *reg,
static void ksz_get_dscp_prio_reg(struct ksz_device *dev, int *reg,
int *per_reg, u8 *mask)
{
- if (ksz_is_ksz87xx(dev)) {
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev)) {
*reg = KSZ8765_REG_TOS_DSCP_CTRL;
*per_reg = 4;
*mask = GENMASK(1, 0);
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
index f0bd46e5d4ec..050f17c43ef6 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.c
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -266,7 +266,6 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev)
struct ksz_port *prt;
struct dsa_port *dp;
bool tag_en = false;
- int ret;
dsa_switch_for_each_user_port(dp, dev->ds) {
prt = &dev->ports[dp->index];
@@ -277,9 +276,7 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev)
}
if (tag_en) {
- ret = ptp_schedule_worker(ptp_data->clock, 0);
- if (ret)
- return ret;
+ ptp_schedule_worker(ptp_data->clock, 0);
} else {
ptp_cancel_worker_sync(ptp_data->clock);
}
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 8e8d83213b04..1c6652f2b9fe 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -2,11 +2,11 @@
/*
* Microchip ksz series register access through SPI
*
- * Copyright (C) 2017 Microchip Technology Inc.
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
* Tristram Ha <Tristram.Ha@microchip.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/delay.h>
#include <linux/kernel.h>
@@ -54,12 +54,15 @@ static int ksz_spi_probe(struct spi_device *spi)
if (!chip)
return -EINVAL;
- if (chip->chip_id == KSZ8830_CHIP_ID)
+ if (chip->chip_id == KSZ88X3_CHIP_ID)
regmap_config = ksz8863_regmap_config;
else if (chip->chip_id == KSZ8795_CHIP_ID ||
chip->chip_id == KSZ8794_CHIP_ID ||
chip->chip_id == KSZ8765_CHIP_ID)
regmap_config = ksz8795_regmap_config;
+ else if (chip->chip_id == KSZ8895_CHIP_ID ||
+ chip->chip_id == KSZ8864_CHIP_ID)
+ regmap_config = ksz8863_regmap_config;
else
regmap_config = ksz9477_regmap_config;
@@ -134,11 +137,19 @@ static const struct of_device_id ksz_dt_ids[] = {
},
{
.compatible = "microchip,ksz8863",
- .data = &ksz_switch_chips[KSZ8830]
+ .data = &ksz_switch_chips[KSZ88X3]
+ },
+ {
+ .compatible = "microchip,ksz8864",
+ .data = &ksz_switch_chips[KSZ8864]
},
{
.compatible = "microchip,ksz8873",
- .data = &ksz_switch_chips[KSZ8830]
+ .data = &ksz_switch_chips[KSZ88X3]
+ },
+ {
+ .compatible = "microchip,ksz8895",
+ .data = &ksz_switch_chips[KSZ8895]
},
{
.compatible = "microchip,ksz9477",
@@ -201,7 +212,9 @@ static const struct spi_device_id ksz_spi_ids[] = {
{ "ksz8794" },
{ "ksz8795" },
{ "ksz8863" },
+ { "ksz8864" },
{ "ksz8873" },
+ { "ksz8895" },
{ "ksz9477" },
{ "ksz9896" },
{ "ksz9897" },
diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c
index b74a230a3f13..10dc49961f15 100644
--- a/drivers/net/dsa/mt7530-mmio.c
+++ b/drivers/net/dsa/mt7530-mmio.c
@@ -11,6 +11,7 @@
#include "mt7530.h"
static const struct of_device_id mt7988_of_match[] = {
+ { .compatible = "airoha,en7581-switch", .data = &mt753x_table[ID_EN7581], },
{ .compatible = "mediatek,mt7988-switch", .data = &mt753x_table[ID_MT7988], },
{ /* sentinel */ },
};
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index ec18e68bf3a8..d84ee1b419a6 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1152,7 +1152,8 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
* the MT7988 SoC. Trapped frames will be forwarded to the CPU port that
* is affine to the inbound user port.
*/
- if (priv->id == ID_MT7531 || priv->id == ID_MT7988)
+ if (priv->id == ID_MT7531 || priv->id == ID_MT7988 ||
+ priv->id == ID_EN7581)
mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
/* CPU port gets connected to all user ports of
@@ -2207,7 +2208,7 @@ mt7530_setup_irq(struct mt7530_priv *priv)
return priv->irq ? : -EINVAL;
}
- if (priv->id == ID_MT7988)
+ if (priv->id == ID_MT7988 || priv->id == ID_EN7581)
priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
&mt7988_irq_domain_ops,
priv);
@@ -2438,8 +2439,10 @@ mt7530_setup(struct dsa_switch *ds)
/* Clear link settings and enable force mode to force link down
* on all ports until they're enabled later.
*/
- mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
- MT7530_FORCE_MODE, MT7530_FORCE_MODE);
+ mt7530_rmw(priv, MT753X_PMCR_P(i),
+ PMCR_LINK_SETTINGS_MASK |
+ MT753X_FORCE_MODE(priv->id),
+ MT753X_FORCE_MODE(priv->id));
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
@@ -2550,8 +2553,10 @@ mt7531_setup_common(struct dsa_switch *ds)
/* Clear link settings and enable force mode to force link down
* on all ports until they're enabled later.
*/
- mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
- MT7531_FORCE_MODE_MASK, MT7531_FORCE_MODE_MASK);
+ mt7530_rmw(priv, MT753X_PMCR_P(i),
+ PMCR_LINK_SETTINGS_MASK |
+ MT753X_FORCE_MODE(priv->id),
+ MT753X_FORCE_MODE(priv->id));
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
@@ -2783,6 +2788,28 @@ static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
}
}
+static void en7581_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+ break;
+
+ /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_10000FD;
+ break;
+ }
+}
+
static void
mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
@@ -3220,6 +3247,16 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c45 = mt7531_ind_c45_phy_write,
.mac_port_get_caps = mt7988_mac_port_get_caps,
},
+ [ID_EN7581] = {
+ .id = ID_EN7581,
+ .pcs_ops = &mt7530_pcs_ops,
+ .sw_setup = mt7988_setup,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
+ .mac_port_get_caps = en7581_mac_port_get_caps,
+ },
};
EXPORT_SYMBOL_GPL(mt753x_table);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 28592123070b..6ad33a9f6b1d 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -19,6 +19,7 @@ enum mt753x_id {
ID_MT7621 = 1,
ID_MT7531 = 2,
ID_MT7988 = 3,
+ ID_EN7581 = 4,
};
#define NUM_TRGMII_CTRL 5
@@ -64,25 +65,30 @@ enum mt753x_id {
#define MT7531_CPU_PMAP(x) FIELD_PREP(MT7531_CPU_PMAP_MASK, x)
#define MT753X_MIRROR_REG(id) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_CFC : MT753X_MFC)
#define MT753X_MIRROR_EN(id) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_MIRROR_EN : MT7530_MIRROR_EN)
#define MT753X_MIRROR_PORT_MASK(id) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_MIRROR_PORT_MASK : \
MT7530_MIRROR_PORT_MASK)
#define MT753X_MIRROR_PORT_GET(id, val) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_MIRROR_PORT_GET(val) : \
MT7530_MIRROR_PORT_GET(val))
#define MT753X_MIRROR_PORT_SET(id, val) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_MIRROR_PORT_SET(val) : \
MT7530_MIRROR_PORT_SET(val))
@@ -355,6 +361,10 @@ enum mt7530_vlan_port_acc_frm {
MT7531_FORCE_MODE_TX_FC | \
MT7531_FORCE_MODE_EEE100 | \
MT7531_FORCE_MODE_EEE1G)
+#define MT753X_FORCE_MODE(id) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_FORCE_MODE_MASK : \
+ MT7530_FORCE_MODE)
#define PMCR_LINK_SETTINGS_MASK (PMCR_MAC_TX_EN | PMCR_MAC_RX_EN | \
PMCR_FORCE_EEE1G | \
PMCR_FORCE_EEE100 | \
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 5b4e2ce5470d..284270a4ade1 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -6347,7 +6347,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.invalid_port_mask = BIT(1) | BIT(2) | BIT(8),
.num_internal_phys = 5,
.internal_phys_offset = 3,
- .max_vid = 4095,
+ .max_vid = 8191,
.max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index ce3b3690c3c0..c47f068f56b3 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -457,7 +457,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
trace_mv88e6xxx_atu_full_violation(chip->dev, spid,
entry.portvec, entry.mac,
fid);
- chip->ports[spid].atu_full_violation++;
+ if (spid < ARRAY_SIZE(chip->ports))
+ chip->ports[spid].atu_full_violation++;
}
return IRQ_HANDLED;
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index 61ab6cc4fbfc..53a6d3ed63b3 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -146,7 +146,7 @@ static int mv88e6352_g2_scratch_gpio_set_data(struct mv88e6xxx_chip *chip,
* @chip: chip private data
* @pin: gpio index
*
- * Return: 0 for output, 1 for input (same as GPIOF_DIR_XXX).
+ * Return: 0 for output, 1 for input.
*/
static int mv88e6352_g2_scratch_gpio_get_dir(struct mv88e6xxx_chip *chip,
unsigned int pin)
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index e554699f06d4..3aa9c997018a 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -61,11 +61,46 @@ static int felix_cpu_port_for_conduit(struct dsa_switch *ds,
return cpu_dp->index;
}
+/**
+ * felix_update_tag_8021q_rx_rule - Update VCAP ES0 tag_8021q rule after
+ * vlan_filtering change
+ * @outer_tagging_rule: Pointer to VCAP filter on which the update is performed
+ * @vlan_filtering: Current bridge VLAN filtering setting
+ *
+ * Source port identification for tag_8021q is done using VCAP ES0 rules on the
+ * CPU port(s). The ES0 tag B (inner tag from the packet) can be configured as
+ * either:
+ * - push_inner_tag=0: the inner tag is never pushed into the frame
+ * (and we lose info about the classified VLAN). This is
+ * good when the classified VLAN is a discardable quantity
+ * for the software RX path: it is either set to
+ * OCELOT_STANDALONE_PVID, or to
+ * ocelot_vlan_unaware_pvid(bridge).
+ * - push_inner_tag=1: the inner tag is always pushed. This is good when the
+ * classified VLAN is not a discardable quantity (the port
+ * is under a VLAN-aware bridge, and software needs to
+ * continue processing the packet in the same VLAN as the
+ * hardware).
+ * The point is that what is good for a VLAN-unaware port is not good for a
+ * VLAN-aware port, and vice versa. Thus, the RX tagging rules must be kept in
+ * sync with the VLAN filtering state of the port.
+ */
+static void
+felix_update_tag_8021q_rx_rule(struct ocelot_vcap_filter *outer_tagging_rule,
+ bool vlan_filtering)
+{
+ if (vlan_filtering)
+ outer_tagging_rule->action.push_inner_tag = OCELOT_ES0_TAG;
+ else
+ outer_tagging_rule->action.push_inner_tag = OCELOT_NO_ES0_TAG;
+}
+
/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
* the tagger can perform RX source port identification.
*/
static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port,
- int upstream, u16 vid)
+ int upstream, u16 vid,
+ bool vlan_filtering)
{
struct ocelot_vcap_filter *outer_tagging_rule;
struct ocelot *ocelot = ds->priv;
@@ -96,6 +131,14 @@ static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port,
outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD;
outer_tagging_rule->action.tag_a_vid_sel = 1;
outer_tagging_rule->action.vid_a_val = vid;
+ felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering);
+ outer_tagging_rule->action.tag_b_tpid_sel = OCELOT_TAG_TPID_SEL_8021Q;
+ /* Leave TAG_B_VID_SEL at 0 (Classified VID + VID_B_VAL). Since we also
+ * leave VID_B_VAL at 0, this makes ES0 tag B (the inner tag) equal to
+ * the classified VID, which we need to see in the DSA tagger's receive
+ * path. Note: the inner tag is only visible in the packet when pushed
+ * (push_inner_tag == OCELOT_ES0_TAG).
+ */
err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL);
if (err)
@@ -227,6 +270,7 @@ static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid)
static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
u16 flags)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct dsa_port *cpu_dp;
int err;
@@ -234,11 +278,12 @@ static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
* membership, which we aren't. So we don't need to add any VCAP filter
* for the CPU port.
*/
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return 0;
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
- err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
+ err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid,
+ dsa_port_is_vlan_filtering(dp));
if (err)
return err;
}
@@ -258,10 +303,11 @@ add_tx_failed:
static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct dsa_port *cpu_dp;
int err;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return 0;
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
@@ -278,11 +324,41 @@ static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
del_tx_failed:
dsa_switch_for_each_cpu_port(cpu_dp, ds)
- felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
+ felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid,
+ dsa_port_is_vlan_filtering(dp));
return err;
}
+static int felix_update_tag_8021q_rx_rules(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot_vcap_block *block_vcap_es0;
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
+ unsigned long cookie;
+ int err;
+
+ block_vcap_es0 = &ocelot->block[VCAP_ES0];
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port,
+ cpu_dp->index);
+
+ outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
+ cookie, false);
+
+ felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering);
+
+ err = ocelot_vcap_filter_replace(ocelot, outer_tagging_rule);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int felix_trap_get_cpu_port(struct dsa_switch *ds,
const struct ocelot_vcap_filter *trap)
{
@@ -528,7 +604,19 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds)
* so we need to be careful that there are no extra frames to be
* dequeued over MMIO, since we would never know to discard them.
*/
+ ocelot_lock_xtr_grp_bh(ocelot, 0);
ocelot_drain_cpu_queue(ocelot, 0);
+ ocelot_unlock_xtr_grp_bh(ocelot, 0);
+
+ /* Problem: when using push_inner_tag=1 for ES0 tag B, we lose info
+ * about whether the received packets were VLAN-tagged on the wire,
+ * since they are always tagged on egress towards the CPU port.
+ *
+ * Since using push_inner_tag=1 is unavoidable for VLAN-aware bridges,
+ * we must work around the fallout by untagging in software to make
+ * untagged reception work more or less as expected.
+ */
+ ds->untag_vlan_aware_bridge_pvid = true;
return 0;
}
@@ -554,6 +642,8 @@ static void felix_tag_8021q_teardown(struct dsa_switch *ds)
ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index);
dsa_tag_8021q_unregister(ds);
+
+ ds->untag_vlan_aware_bridge_pvid = false;
}
static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
@@ -1008,8 +1098,23 @@ static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
+ bool using_tag_8021q;
+ struct felix *felix;
+ int err;
- return ocelot_port_vlan_filtering(ocelot, port, enabled, extack);
+ err = ocelot_port_vlan_filtering(ocelot, port, enabled, extack);
+ if (err)
+ return err;
+
+ felix = ocelot_to_felix(ocelot);
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
+ if (using_tag_8021q) {
+ err = felix_update_tag_8021q_rx_rules(ds, port, enabled);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static int felix_vlan_add(struct dsa_switch *ds, int port,
@@ -1265,9 +1370,8 @@ static int felix_parse_ports_node(struct felix *felix,
phy_interface_t *port_phy_modes)
{
struct device *dev = felix->ocelot.dev;
- struct device_node *child;
- for_each_available_child_of_node(ports_node, child) {
+ for_each_available_child_of_node_scoped(ports_node, child) {
phy_interface_t phy_mode;
u32 port;
int err;
@@ -1276,7 +1380,6 @@ static int felix_parse_ports_node(struct felix *felix,
if (of_property_read_u32(child, "reg", &port) < 0) {
dev_err(dev, "Port number not defined in device tree "
"(property \"reg\")\n");
- of_node_put(child);
return -ENODEV;
}
@@ -1286,7 +1389,6 @@ static int felix_parse_ports_node(struct felix *felix,
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
port);
- of_node_put(child);
return -ENODEV;
}
@@ -1518,6 +1620,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
int port = xmit_work->dp->index;
int retries = 10;
+ ocelot_lock_inj_grp(ocelot, 0);
+
do {
if (ocelot_can_inject(ocelot, 0))
break;
@@ -1526,6 +1630,7 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
} while (--retries);
if (!retries) {
+ ocelot_unlock_inj_grp(ocelot, 0);
dev_err(ocelot->dev, "port %d failed to inject skb\n",
port);
ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
@@ -1535,6 +1640,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+ ocelot_unlock_inj_grp(ocelot, 0);
+
consume_skb(skb);
kfree(xmit_work);
}
@@ -1694,6 +1801,8 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot)
if (!felix->info->quirk_no_xtr_irq)
return false;
+ ocelot_lock_xtr_grp(ocelot, grp);
+
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
unsigned int type;
@@ -1730,6 +1839,8 @@ out:
ocelot_drain_cpu_queue(ocelot, 0);
}
+ ocelot_unlock_xtr_grp(ocelot, grp);
+
return true;
}
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index ba37a566da39..0102a82e88cc 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1474,10 +1474,13 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
/* Hardware errata - Admin config could not be overwritten if
* config is pending, need reset the TAS module
*/
- val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
- if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
- ret = -EBUSY;
- goto err_reset_tc;
+ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
+ if (val & QSYS_TAG_CONFIG_ENABLE) {
+ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
+ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
+ ret = -EBUSY;
+ goto err_reset_tc;
+ }
}
ocelot_rmw_rix(ocelot,
@@ -1733,7 +1736,7 @@ struct felix_stream_gate {
u64 cycletime;
u64 cycletime_ext;
u32 num_entries;
- struct action_gate_entry entries[];
+ struct action_gate_entry entries[] __counted_by(num_entries);
};
struct felix_stream_gate_entry {
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index b9674f68b756..ad7044b295ec 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -1740,7 +1740,7 @@ static int rtl8365mb_irq_setup(struct realtek_priv *priv)
}
/* Configure chip interrupt signal polarity */
- irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ irq_trig = irq_get_trigger_type(irq);
switch (irq_trig) {
case IRQF_TRIGGER_RISING:
case IRQF_TRIGGER_HIGH:
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index 9e821b42e5f3..c7a8cd060587 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -599,7 +599,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv)
}
/* Fetch IRQ edge information from the descriptor */
- irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ irq_trig = irq_get_trigger_type(irq);
switch (irq_trig) {
case IRQF_TRIGGER_RISING:
case IRQF_TRIGGER_HIGH:
@@ -1009,8 +1009,8 @@ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
static int rtl8366rb_setup_leds(struct realtek_priv *priv)
{
- struct device_node *leds_np, *led_np;
struct dsa_switch *ds = &priv->ds;
+ struct device_node *leds_np;
struct dsa_port *dp;
int ret = 0;
@@ -1025,13 +1025,11 @@ static int rtl8366rb_setup_leds(struct realtek_priv *priv)
continue;
}
- for_each_child_of_node(leds_np, led_np) {
+ for_each_child_of_node_scoped(leds_np, led_np) {
ret = rtl8366rb_setup_led(priv, dp,
of_fwnode_handle(led_np));
- if (ret) {
- of_node_put(led_np);
+ if (ret)
break;
- }
}
of_node_put(leds_np);
diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c
index 35709a1756ae..3c5018d5e1f9 100644
--- a/drivers/net/dsa/realtek/rtl83xx.c
+++ b/drivers/net/dsa/realtek/rtl83xx.c
@@ -185,11 +185,9 @@ rtl83xx_probe(struct device *dev,
/* TODO: if power is software controlled, set up any regulators here */
priv->reset_ctl = devm_reset_control_get_optional(dev, NULL);
- if (IS_ERR(priv->reset_ctl)) {
- ret = PTR_ERR(priv->reset_ctl);
- dev_err_probe(dev, ret, "failed to get reset control\n");
- return ERR_CAST(priv->reset_ctl);
- }
+ if (IS_ERR(priv->reset_ctl))
+ return dev_err_cast_probe(dev, priv->reset_ctl,
+ "failed to get reset control\n");
priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(priv->reset)) {
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index c7282ce3d11c..d0563ef59acf 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1188,9 +1188,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
struct device_node *ports_node)
{
struct device *dev = &priv->spidev->dev;
- struct device_node *child;
- for_each_available_child_of_node(ports_node, child) {
+ for_each_available_child_of_node_scoped(ports_node, child) {
struct device_node *phy_node;
phy_interface_t phy_mode;
u32 index;
@@ -1200,7 +1199,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
if (of_property_read_u32(child, "reg", &index) < 0) {
dev_err(dev, "Port number not defined in device tree "
"(property \"reg\")\n");
- of_node_put(child);
return -ENODEV;
}
@@ -1210,7 +1208,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
index);
- of_node_put(child);
return -ENODEV;
}
@@ -1219,7 +1216,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
if (!of_phy_is_fixed_link(child)) {
dev_err(dev, "phy-handle or fixed-link "
"properties missing!\n");
- of_node_put(child);
return -ENODEV;
}
/* phy-handle is missing, but fixed-link isn't.
@@ -1233,10 +1229,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
priv->phy_mode[index] = phy_mode;
err = sja1105_parse_rgmii_delays(priv, index, child);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
@@ -3164,7 +3158,6 @@ static int sja1105_setup(struct dsa_switch *ds)
* TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
*/
ds->vlan_filtering_is_global = true;
- ds->untag_bridge_pvid = true;
ds->fdb_isolation = true;
ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index d9d3e30fd47a..f18aa321053d 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
@@ -36,11 +37,17 @@
#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */
-#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */
+#define VSC73XX_BLOCK_CAPTURE 0x4 /* Subblocks 0-4, 6, 7 */
#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
+/* MII Block subblock */
+#define VSC73XX_BLOCK_MII_INTERNAL 0x0 /* Internal MDIO subblock */
+#define VSC73XX_BLOCK_MII_EXTERNAL 0x1 /* External MDIO subblock */
+
#define CPU_PORT 6 /* CPU port */
+#define VSC73XX_NUM_FDB_ROWS 2048
+#define VSC73XX_NUM_BUCKETS 4
/* MAC Block registers */
#define VSC73XX_MAC_CFG 0x00
@@ -192,6 +199,40 @@
#define VSC73XX_SRCMASKS_MIRROR BIT(26)
#define VSC73XX_SRCMASKS_PORTS_MASK GENMASK(7, 0)
+#define VSC73XX_MACHDATA_VID GENMASK(27, 16)
+#define VSC73XX_MACHDATA_MAC0 GENMASK(15, 8)
+#define VSC73XX_MACHDATA_MAC1 GENMASK(7, 0)
+#define VSC73XX_MACLDATA_MAC2 GENMASK(31, 24)
+#define VSC73XX_MACLDATA_MAC3 GENMASK(23, 16)
+#define VSC73XX_MACLDATA_MAC4 GENMASK(15, 8)
+#define VSC73XX_MACLDATA_MAC5 GENMASK(7, 0)
+
+#define VSC73XX_HASH0_VID_FROM_MASK GENMASK(5, 0)
+#define VSC73XX_HASH0_MAC0_FROM_MASK GENMASK(7, 4)
+#define VSC73XX_HASH1_MAC0_FROM_MASK GENMASK(3, 0)
+#define VSC73XX_HASH1_MAC1_FROM_MASK GENMASK(7, 1)
+#define VSC73XX_HASH2_MAC1_FROM_MASK BIT(0)
+#define VSC73XX_HASH2_MAC2_FROM_MASK GENMASK(7, 0)
+#define VSC73XX_HASH2_MAC3_FROM_MASK GENMASK(7, 6)
+#define VSC73XX_HASH3_MAC3_FROM_MASK GENMASK(5, 0)
+#define VSC73XX_HASH3_MAC4_FROM_MASK GENMASK(7, 3)
+#define VSC73XX_HASH4_MAC4_FROM_MASK GENMASK(2, 0)
+
+#define VSC73XX_HASH0_VID_TO_MASK GENMASK(9, 4)
+#define VSC73XX_HASH0_MAC0_TO_MASK GENMASK(3, 0)
+#define VSC73XX_HASH1_MAC0_TO_MASK GENMASK(10, 7)
+#define VSC73XX_HASH1_MAC1_TO_MASK GENMASK(6, 0)
+#define VSC73XX_HASH2_MAC1_TO_MASK BIT(10)
+#define VSC73XX_HASH2_MAC2_TO_MASK GENMASK(9, 2)
+#define VSC73XX_HASH2_MAC3_TO_MASK GENMASK(1, 0)
+#define VSC73XX_HASH3_MAC3_TO_MASK GENMASK(10, 5)
+#define VSC73XX_HASH3_MAC4_TO_MASK GENMASK(4, 0)
+#define VSC73XX_HASH4_MAC4_TO_MASK GENMASK(10, 8)
+
+#define VSC73XX_MACTINDX_SHADOW BIT(13)
+#define VSC73XX_MACTINDX_BUCKET_MSK GENMASK(12, 11)
+#define VSC73XX_MACTINDX_INDEX_MSK GENMASK(10, 0)
+
#define VSC73XX_MACACCESS_CPU_COPY BIT(14)
#define VSC73XX_MACACCESS_FWD_KILL BIT(13)
#define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12)
@@ -221,9 +262,29 @@
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE 3
/* MII block 3 registers */
-#define VSC73XX_MII_STAT 0x0
-#define VSC73XX_MII_CMD 0x1
-#define VSC73XX_MII_DATA 0x2
+#define VSC73XX_MII_STAT 0x0
+#define VSC73XX_MII_CMD 0x1
+#define VSC73XX_MII_DATA 0x2
+#define VSC73XX_MII_MPRES 0x3
+
+#define VSC73XX_MII_STAT_BUSY BIT(3)
+#define VSC73XX_MII_STAT_READ BIT(2)
+#define VSC73XX_MII_STAT_WRITE BIT(1)
+
+#define VSC73XX_MII_CMD_SCAN BIT(27)
+#define VSC73XX_MII_CMD_OPERATION BIT(26)
+#define VSC73XX_MII_CMD_PHY_ADDR GENMASK(25, 21)
+#define VSC73XX_MII_CMD_PHY_REG GENMASK(20, 16)
+#define VSC73XX_MII_CMD_WRITE_DATA GENMASK(15, 0)
+
+#define VSC73XX_MII_DATA_FAILURE BIT(16)
+#define VSC73XX_MII_DATA_READ_DATA GENMASK(15, 0)
+
+#define VSC73XX_MII_MPRES_NOPREAMBLE BIT(6)
+#define VSC73XX_MII_MPRES_PRESCALEVAL GENMASK(5, 0)
+#define VSC73XX_MII_PRESCALEVAL_MIN 3 /* min allowed mdio clock prescaler */
+
+#define VSC73XX_MII_STAT_BUSY BIT(3)
/* Arbiter block 5 registers */
#define VSC73XX_ARBEMPTY 0x0c
@@ -299,6 +360,7 @@
#define IS_739X(a) (IS_7395(a) || IS_7398(a))
#define VSC73XX_POLL_SLEEP_US 1000
+#define VSC73XX_MDIO_POLL_SLEEP_US 5
#define VSC73XX_POLL_TIMEOUT_US 10000
struct vsc73xx_counter {
@@ -306,6 +368,13 @@ struct vsc73xx_counter {
const char *name;
};
+struct vsc73xx_fdb {
+ u16 vid;
+ u8 port;
+ u8 mac[ETH_ALEN];
+ bool valid;
+};
+
/* Counters are named according to the MIB standards where applicable.
* Some counters are custom, non-standard. The standard counters are
* named in accordance with RFC2819, RFC2021 and IEEE Std 802.3-2002 Annex
@@ -403,13 +472,19 @@ int vsc73xx_is_addr_valid(u8 block, u8 subblock)
break;
case VSC73XX_BLOCK_MII:
- case VSC73XX_BLOCK_CAPTURE:
case VSC73XX_BLOCK_ARBITER:
switch (subblock) {
case 0 ... 1:
return 1;
}
break;
+ case VSC73XX_BLOCK_CAPTURE:
+ switch (subblock) {
+ case 0 ... 4:
+ case 6 ... 7:
+ return 1;
+ }
+ break;
}
return 0;
@@ -527,6 +602,22 @@ static int vsc73xx_detect(struct vsc73xx *vsc)
return 0;
}
+static int vsc73xx_mdio_busy_check(struct vsc73xx *vsc)
+{
+ int ret, err;
+ u32 val;
+
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 || !(val & VSC73XX_MII_STAT_BUSY),
+ VSC73XX_MDIO_POLL_SLEEP_US,
+ VSC73XX_POLL_TIMEOUT_US, false, vsc,
+ VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_STAT, &val);
+ if (ret)
+ return ret;
+ return err;
+}
+
static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
{
struct vsc73xx *vsc = ds->priv;
@@ -534,21 +625,33 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
u32 val;
int ret;
+ ret = vsc73xx_mdio_busy_check(vsc);
+ if (ret)
+ return ret;
+
/* Setting bit 26 means "read" */
- cmd = BIT(26) | (phy << 21) | (regnum << 16);
- ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ cmd = VSC73XX_MII_CMD_OPERATION |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_ADDR, phy) |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_REG, regnum);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_CMD, cmd);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_mdio_busy_check(vsc);
if (ret)
return ret;
- msleep(2);
- ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_DATA, &val);
if (ret)
return ret;
- if (val & BIT(16)) {
+ if (val & VSC73XX_MII_DATA_FAILURE) {
dev_err(vsc->dev, "reading reg %02x from phy%d failed\n",
regnum, phy);
return -EIO;
}
- val &= 0xFFFFU;
+ val &= VSC73XX_MII_DATA_READ_DATA;
dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n",
regnum, phy, val);
@@ -563,19 +666,15 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
u32 cmd;
int ret;
- /* It was found through tedious experiments that this router
- * chip really hates to have it's PHYs reset. They
- * never recover if that happens: autonegotiation stops
- * working after a reset. Just filter out this command.
- * (Resetting the whole chip is OK.)
- */
- if (regnum == 0 && (val & BIT(15))) {
- dev_info(vsc->dev, "reset PHY - disallowed\n");
- return 0;
- }
+ ret = vsc73xx_mdio_busy_check(vsc);
+ if (ret)
+ return ret;
- cmd = (phy << 21) | (regnum << 16);
- ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ cmd = FIELD_PREP(VSC73XX_MII_CMD_PHY_ADDR, phy) |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_REG, regnum) |
+ FIELD_PREP(VSC73XX_MII_CMD_WRITE_DATA, val);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_CMD, cmd);
if (ret)
return ret;
@@ -684,15 +783,76 @@ vsc73xx_update_vlan_table(struct vsc73xx *vsc, int port, u16 vid, bool set)
return vsc73xx_write_vlan_table_entry(vsc, vid, portmap);
}
+static int vsc73xx_configure_rgmii_port_delay(struct dsa_switch *ds)
+{
+ /* Keep 2.0 ns delay for backward complatibility */
+ u32 tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS;
+ u32 rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS;
+ struct dsa_port *dp = dsa_to_port(ds, CPU_PORT);
+ struct device_node *port_dn = dp->dn;
+ struct vsc73xx *vsc = ds->priv;
+ u32 delay;
+
+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) {
+ switch (delay) {
+ case 0:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE;
+ break;
+ case 1400:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS;
+ break;
+ case 1700:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS;
+ break;
+ case 2000:
+ break;
+ default:
+ dev_err(vsc->dev,
+ "Unsupported RGMII Transmit Clock Delay\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(vsc->dev,
+ "RGMII Transmit Clock Delay isn't configured, set to 2.0 ns\n");
+ }
+
+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay)) {
+ switch (delay) {
+ case 0:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE;
+ break;
+ case 1400:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS;
+ break;
+ case 1700:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS;
+ break;
+ case 2000:
+ break;
+ default:
+ dev_err(vsc->dev,
+ "Unsupported RGMII Receive Clock Delay value\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(vsc->dev,
+ "RGMII Receive Clock Delay isn't configured, set to 2.0 ns\n");
+ }
+
+ /* MII delay, set both GTX and RX delay */
+ return vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
+ tx_delay | rx_delay);
+}
+
static int vsc73xx_setup(struct dsa_switch *ds)
{
struct vsc73xx *vsc = ds->priv;
- int i, ret;
+ int i, ret, val;
dev_info(vsc->dev, "set up the switch\n");
- ds->untag_bridge_pvid = true;
ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
+ ds->fdb_isolation = true;
/* Issue RESET */
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
@@ -746,10 +906,11 @@ static int vsc73xx_setup(struct dsa_switch *ds)
VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
}
- /* MII delay, set both GTX and RX delay to 2 ns */
- vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
- VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS |
- VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS);
+ /* Configure RGMII delay */
+ ret = vsc73xx_configure_rgmii_port_delay(ds);
+ if (ret)
+ return ret;
+
/* Ingess VLAN reception mask (table 145) */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANMASK,
0xff);
@@ -759,6 +920,15 @@ static int vsc73xx_setup(struct dsa_switch *ds)
mdelay(50);
+ /* Disable preamble and use maximum allowed clock for the internal
+ * mdio bus, used for communication with internal PHYs only.
+ */
+ val = VSC73XX_MII_MPRES_NOPREAMBLE |
+ FIELD_PREP(VSC73XX_MII_MPRES_PRESCALEVAL,
+ VSC73XX_MII_PRESCALEVAL_MIN);
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_MPRES, val);
+
/* Release reset from the internal PHYs */
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
VSC73XX_GLORESET_PHY_RESET);
@@ -957,6 +1127,11 @@ static void vsc73xx_mac_link_up(struct phylink_config *config,
if (duplex == DUPLEX_FULL)
val |= VSC73XX_MAC_CFG_FDX;
+ else
+ /* In datasheet description ("Port Mode Procedure" in 5.6.2)
+ * this bit is configured only for half duplex.
+ */
+ val |= VSC73XX_MAC_CFG_WEXC_DIS;
/* This routine is described in the datasheet (below ARBDISC register
* description)
@@ -967,7 +1142,6 @@ static void vsc73xx_mac_link_up(struct phylink_config *config,
get_random_bytes(&seed, 1);
val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
val |= VSC73XX_MAC_CFG_SEED_LOAD;
- val |= VSC73XX_MAC_CFG_WEXC_DIS;
/* Those bits are responsible for MTU only. Kernel takes care about MTU,
* let's enable +8 bytes frame length unconditionally.
@@ -1729,6 +1903,312 @@ static void vsc73xx_port_stp_state_set(struct dsa_switch *ds, int port,
vsc73xx_refresh_fwd_map(ds, port, state);
}
+static u16 vsc73xx_calc_hash(const unsigned char *addr, u16 vid)
+{
+ /* VID 5-0, MAC 47-44 */
+ u16 hash = FIELD_PREP(VSC73XX_HASH0_VID_TO_MASK,
+ FIELD_GET(VSC73XX_HASH0_VID_FROM_MASK, vid)) |
+ FIELD_PREP(VSC73XX_HASH0_MAC0_TO_MASK,
+ FIELD_GET(VSC73XX_HASH0_MAC0_FROM_MASK, addr[0]));
+ /* MAC 43-33 */
+ hash ^= FIELD_PREP(VSC73XX_HASH1_MAC0_TO_MASK,
+ FIELD_GET(VSC73XX_HASH1_MAC0_FROM_MASK, addr[0])) |
+ FIELD_PREP(VSC73XX_HASH1_MAC1_TO_MASK,
+ FIELD_GET(VSC73XX_HASH1_MAC1_FROM_MASK, addr[1]));
+ /* MAC 32-22 */
+ hash ^= FIELD_PREP(VSC73XX_HASH2_MAC1_TO_MASK,
+ FIELD_GET(VSC73XX_HASH2_MAC1_FROM_MASK, addr[1])) |
+ FIELD_PREP(VSC73XX_HASH2_MAC2_TO_MASK,
+ FIELD_GET(VSC73XX_HASH2_MAC2_FROM_MASK, addr[2])) |
+ FIELD_PREP(VSC73XX_HASH2_MAC3_TO_MASK,
+ FIELD_GET(VSC73XX_HASH2_MAC3_FROM_MASK, addr[3]));
+ /* MAC 21-11 */
+ hash ^= FIELD_PREP(VSC73XX_HASH3_MAC3_TO_MASK,
+ FIELD_GET(VSC73XX_HASH3_MAC3_FROM_MASK, addr[3])) |
+ FIELD_PREP(VSC73XX_HASH3_MAC4_TO_MASK,
+ FIELD_GET(VSC73XX_HASH3_MAC4_FROM_MASK, addr[4]));
+ /* MAC 10-0 */
+ hash ^= FIELD_PREP(VSC73XX_HASH4_MAC4_TO_MASK,
+ FIELD_GET(VSC73XX_HASH4_MAC4_FROM_MASK, addr[4])) |
+ addr[5];
+
+ return hash;
+}
+
+static int
+vsc73xx_port_wait_for_mac_table_cmd(struct vsc73xx *vsc)
+{
+ int ret, err;
+ u32 val;
+
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 ||
+ ((val & VSC73XX_MACACCESS_CMD_MASK) ==
+ VSC73XX_MACACCESS_CMD_IDLE),
+ VSC73XX_POLL_SLEEP_US, VSC73XX_POLL_TIMEOUT_US,
+ false, vsc, VSC73XX_BLOCK_ANALYZER,
+ 0, VSC73XX_MACACCESS, &val);
+ if (ret)
+ return ret;
+ return err;
+}
+
+static int vsc73xx_port_read_mac_table_row(struct vsc73xx *vsc, u16 index,
+ struct vsc73xx_fdb *fdb)
+{
+ int ret, i;
+ u32 val;
+
+ if (!fdb)
+ return -EINVAL;
+ if (index >= VSC73XX_NUM_FDB_ROWS)
+ return -EINVAL;
+
+ for (i = 0; i < VSC73XX_NUM_BUCKETS; i++) {
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACTINDX,
+ (i ? 0 : VSC73XX_MACTINDX_SHADOW) |
+ FIELD_PREP(VSC73XX_MACTINDX_BUCKET_MSK, i) |
+ index);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_port_wait_for_mac_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS,
+ VSC73XX_MACACCESS_CMD_MASK,
+ VSC73XX_MACACCESS_CMD_READ_ENTRY);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_port_wait_for_mac_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS, &val);
+ if (ret)
+ return ret;
+
+ fdb[i].valid = FIELD_GET(VSC73XX_MACACCESS_VALID, val);
+ if (!fdb[i].valid)
+ continue;
+
+ fdb[i].port = FIELD_GET(VSC73XX_MACACCESS_DEST_IDX_MASK, val);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACHDATA, &val);
+ if (ret)
+ return ret;
+
+ fdb[i].vid = FIELD_GET(VSC73XX_MACHDATA_VID, val);
+ fdb[i].mac[0] = FIELD_GET(VSC73XX_MACHDATA_MAC0, val);
+ fdb[i].mac[1] = FIELD_GET(VSC73XX_MACHDATA_MAC1, val);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACLDATA, &val);
+ if (ret)
+ return ret;
+
+ fdb[i].mac[2] = FIELD_GET(VSC73XX_MACLDATA_MAC2, val);
+ fdb[i].mac[3] = FIELD_GET(VSC73XX_MACLDATA_MAC3, val);
+ fdb[i].mac[4] = FIELD_GET(VSC73XX_MACLDATA_MAC4, val);
+ fdb[i].mac[5] = FIELD_GET(VSC73XX_MACLDATA_MAC5, val);
+ }
+
+ return ret;
+}
+
+static int
+vsc73xx_fdb_operation(struct vsc73xx *vsc, const unsigned char *addr, u16 vid,
+ u16 hash, u16 cmd_mask, u16 cmd_val)
+{
+ int ret;
+ u32 val;
+
+ val = FIELD_PREP(VSC73XX_MACHDATA_VID, vid) |
+ FIELD_PREP(VSC73XX_MACHDATA_MAC0, addr[0]) |
+ FIELD_PREP(VSC73XX_MACHDATA_MAC1, addr[1]);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACHDATA,
+ val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(VSC73XX_MACLDATA_MAC2, addr[2]) |
+ FIELD_PREP(VSC73XX_MACLDATA_MAC3, addr[3]) |
+ FIELD_PREP(VSC73XX_MACLDATA_MAC4, addr[4]) |
+ FIELD_PREP(VSC73XX_MACLDATA_MAC5, addr[5]);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACLDATA,
+ val);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACTINDX,
+ hash);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_port_wait_for_mac_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS, cmd_mask, cmd_val);
+ if (ret)
+ return ret;
+
+ return vsc73xx_port_wait_for_mac_table_cmd(vsc);
+}
+
+static int vsc73xx_fdb_del_entry(struct vsc73xx *vsc, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS];
+ u16 hash = vsc73xx_calc_hash(addr, vid);
+ int bucket, ret;
+
+ mutex_lock(&vsc->fdb_lock);
+
+ ret = vsc73xx_port_read_mac_table_row(vsc, hash, fdb);
+ if (ret)
+ goto err;
+
+ for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) {
+ if (fdb[bucket].valid && fdb[bucket].port == port &&
+ ether_addr_equal(addr, fdb[bucket].mac))
+ break;
+ }
+
+ if (bucket == VSC73XX_NUM_BUCKETS) {
+ /* Can't find MAC in MAC table */
+ ret = -ENODATA;
+ goto err;
+ }
+
+ ret = vsc73xx_fdb_operation(vsc, addr, vid, hash,
+ VSC73XX_MACACCESS_CMD_MASK,
+ VSC73XX_MACACCESS_CMD_FORGET);
+err:
+ mutex_unlock(&vsc->fdb_lock);
+ return ret;
+}
+
+static int vsc73xx_fdb_add_entry(struct vsc73xx *vsc, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS];
+ u16 hash = vsc73xx_calc_hash(addr, vid);
+ int bucket, ret;
+ u32 val;
+
+ mutex_lock(&vsc->fdb_lock);
+
+ ret = vsc73xx_port_read_mac_table_row(vsc, hash, fdb);
+ if (ret)
+ goto err;
+
+ for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) {
+ if (!fdb[bucket].valid)
+ break;
+ }
+
+ if (bucket == VSC73XX_NUM_BUCKETS) {
+ /* Bucket is full */
+ ret = -EOVERFLOW;
+ goto err;
+ }
+
+ val = VSC73XX_MACACCESS_VALID | VSC73XX_MACACCESS_LOCKED |
+ FIELD_PREP(VSC73XX_MACACCESS_DEST_IDX_MASK, port) |
+ VSC73XX_MACACCESS_CMD_LEARN;
+ ret = vsc73xx_fdb_operation(vsc, addr, vid, hash,
+ VSC73XX_MACACCESS_VALID |
+ VSC73XX_MACACCESS_LOCKED |
+ VSC73XX_MACACCESS_DEST_IDX_MASK |
+ VSC73XX_MACACCESS_CMD_MASK, val);
+err:
+ mutex_unlock(&vsc->fdb_lock);
+ return ret;
+}
+
+static int vsc73xx_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return vsc73xx_fdb_add_entry(vsc, port, addr, vid);
+}
+
+static int vsc73xx_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return vsc73xx_fdb_del_entry(vsc, port, addr, vid);
+}
+
+static int vsc73xx_port_fdb_dump(struct dsa_switch *ds,
+ int port, dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS];
+ struct vsc73xx *vsc = ds->priv;
+ u16 i, bucket;
+ int err = 0;
+
+ mutex_lock(&vsc->fdb_lock);
+
+ for (i = 0; i < VSC73XX_NUM_FDB_ROWS; i++) {
+ err = vsc73xx_port_read_mac_table_row(vsc, i, fdb);
+ if (err)
+ goto unlock;
+
+ for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) {
+ if (!fdb[bucket].valid || fdb[bucket].port != port)
+ continue;
+
+ /* We need to hide dsa_8021q VLANs from the user */
+ if (vid_is_dsa_8021q(fdb[bucket].vid))
+ fdb[bucket].vid = 0;
+
+ err = cb(fdb[bucket].mac, fdb[bucket].vid, false, data);
+ if (err)
+ goto unlock;
+ }
+ }
+unlock:
+ mutex_unlock(&vsc->fdb_lock);
+ return err;
+}
+
static const struct phylink_mac_ops vsc73xx_phylink_mac_ops = {
.mac_config = vsc73xx_mac_config,
.mac_link_down = vsc73xx_mac_link_down,
@@ -1751,6 +2231,9 @@ static const struct dsa_switch_ops vsc73xx_ds_ops = {
.port_bridge_join = dsa_tag_8021q_bridge_join,
.port_bridge_leave = dsa_tag_8021q_bridge_leave,
.port_change_mtu = vsc73xx_change_mtu,
+ .port_fdb_add = vsc73xx_fdb_add,
+ .port_fdb_del = vsc73xx_fdb_del,
+ .port_fdb_dump = vsc73xx_port_fdb_dump,
.port_max_mtu = vsc73xx_get_max_mtu,
.port_stp_state_set = vsc73xx_port_stp_state_set,
.port_vlan_filtering = vsc73xx_port_vlan_filtering,
@@ -1881,6 +2364,8 @@ int vsc73xx_probe(struct vsc73xx *vsc)
return -ENODEV;
}
+ mutex_init(&vsc->fdb_lock);
+
eth_random_addr(vsc->addr);
dev_info(vsc->dev,
"MAC for control frames: %02X:%02X:%02X:%02X:%02X:%02X\n",
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
index 3ca579acc798..3c30e143c14f 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.h
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -45,6 +45,7 @@ struct vsc73xx_portinfo {
* @vlans: List of configured vlans. Contains port mask and untagged status of
* every vlan configured in port vlan operation. It doesn't cover tag_8021q
* vlans.
+ * @fdb_lock: Mutex protects fdb access
*/
struct vsc73xx {
struct device *dev;
@@ -57,6 +58,7 @@ struct vsc73xx {
void *priv;
struct vsc73xx_portinfo portinfo[VSC73XX_MAX_NUM_PORTS];
struct list_head vlans;
+ struct mutex fdb_lock;
};
/**
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index d29b5d7af0d7..e9c5e1e11fa0 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -109,9 +109,10 @@ static void dummy_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+ dev->lltx = true;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
dev->features |= NETIF_F_GSO_ENCAP_ALL;
dev->hw_features |= dev->features;
dev->hw_enc_features |= dev->features;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 0baac25db4f8..9a542e3c9b05 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -158,6 +158,17 @@ config ETHOC
help
Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
+config OA_TC6
+ tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support"
+ depends on SPI
+ select PHYLIB
+ help
+ This library implements OPEN Alliance TC6 10BASE-T1x MAC-PHY
+ Serial Interface protocol for supporting 10BASE-T1x MAC-PHYs.
+
+ To know the implementation details, refer documentation in
+ <file:Documentation/networking/oa-tc6-framework.rst>.
+
source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/pensando/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c03203439c0e..99fa180dedb8 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -105,3 +105,4 @@ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/
+obj-$(CONFIG_OA_TC6) += oa_tc6.o
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 0713f1e2c7f3..68fad5575fd4 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -26,7 +26,7 @@
#include <net/switchdev.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define ADIN1110_PHY_ID 0x1
@@ -318,11 +318,11 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
* from the ADIN1110 frame header.
*/
if (frame_size < ADIN1110_FRAME_HEADER_LEN + ADIN1110_FEC_LEN)
- return ret;
+ return -EINVAL;
round_len = adin1110_round_len(frame_size);
if (round_len < 0)
- return ret;
+ return -EINVAL;
frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN;
memset(priv->data, 0, ADIN1110_RD_HEADER_LEN);
@@ -1599,7 +1599,7 @@ static int adin1110_probe_netdevs(struct adin1110_priv *priv)
netdev->netdev_ops = &adin1110_netdev_ops;
netdev->ethtool_ops = &adin1110_ethtool_ops;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->features |= NETIF_F_NETNS_LOCAL;
+ netdev->netns_local = true;
port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false);
if (IS_ERR(port_priv->phydev)) {
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 27af7746d645..adf6f67c5fcb 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -484,7 +484,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len > MAX_FRAME_SIZE)) {
dev->stats.tx_errors++;
- goto out;
+ goto len_error;
}
/* Save skb pointer. */
@@ -575,6 +575,7 @@ frag_map_error:
map_error:
if (net_ratelimit())
dev_warn(greth->dev, "Could not create TX DMA mapping\n");
+len_error:
dev_kfree_skb(skb);
out:
return err;
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 78231c85234d..f62851708d4f 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1678,17 +1678,15 @@ static int slic_init(struct slic_device *sdev)
slic_card_reset(sdev);
err = slic_load_firmware(sdev);
- if (err) {
- dev_err(&sdev->pdev->dev, "failed to load firmware\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&sdev->pdev->dev, err,
+ "failed to load firmware\n");
/* we need the shared memory to read EEPROM so set it up temporarily */
err = slic_init_shmem(sdev);
- if (err) {
- dev_err(&sdev->pdev->dev, "failed to init shared memory\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&sdev->pdev->dev, err,
+ "failed to init shared memory\n");
err = slic_read_eeprom(sdev);
if (err) {
@@ -1741,10 +1739,9 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "failed to enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to enable PCI device\n");
pci_set_master(pdev);
pci_try_set_mwi(pdev);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 3d8ac63132fb..9e6f91df2ba0 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -1560,9 +1560,9 @@ static void ace_watchdog(struct net_device *data, unsigned int txqueue)
}
-static void ace_tasklet(struct tasklet_struct *t)
+static void ace_bh_work(struct work_struct *work)
{
- struct ace_private *ap = from_tasklet(ap, t, ace_tasklet);
+ struct ace_private *ap = from_work(ap, work, ace_bh_work);
struct net_device *dev = ap->ndev;
int cur_size;
@@ -1595,7 +1595,7 @@ static void ace_tasklet(struct tasklet_struct *t)
#endif
ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
}
- ap->tasklet_pending = 0;
+ ap->bh_work_pending = 0;
}
@@ -1617,7 +1617,7 @@ static void ace_dump_trace(struct ace_private *ap)
*
* Loading rings is safe without holding the spin lock since this is
* done only before the device is enabled, thus no interrupts are
- * generated and by the interrupt handler/tasklet handler.
+ * generated and by the interrupt handler/bh handler.
*/
static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
{
@@ -2160,7 +2160,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
*/
if (netif_running(dev)) {
int cur_size;
- int run_tasklet = 0;
+ int run_bh_work = 0;
cur_size = atomic_read(&ap->cur_rx_bufs);
if (cur_size < RX_LOW_STD_THRES) {
@@ -2172,7 +2172,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_std_rx_ring(dev,
RX_RING_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
if (!ACE_IS_TIGON_I(ap)) {
@@ -2188,7 +2188,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_mini_rx_ring(dev,
RX_MINI_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
}
@@ -2205,12 +2205,12 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_jumbo_rx_ring(dev,
RX_JUMBO_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
}
- if (run_tasklet && !ap->tasklet_pending) {
- ap->tasklet_pending = 1;
- tasklet_schedule(&ap->ace_tasklet);
+ if (run_bh_work && !ap->bh_work_pending) {
+ ap->bh_work_pending = 1;
+ queue_work(system_bh_wq, &ap->ace_bh_work);
}
}
@@ -2267,7 +2267,7 @@ static int ace_open(struct net_device *dev)
/*
* Setup the bottom half rx ring refill handler
*/
- tasklet_setup(&ap->ace_tasklet, ace_tasklet);
+ INIT_WORK(&ap->ace_bh_work, ace_bh_work);
return 0;
}
@@ -2301,7 +2301,7 @@ static int ace_close(struct net_device *dev)
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
- tasklet_kill(&ap->ace_tasklet);
+ cancel_work_sync(&ap->ace_bh_work);
/*
* Make sure one CPU is not processing packets while
diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h
index ca5ce0cbbad1..0e45a97b9c9b 100644
--- a/drivers/net/ethernet/alteon/acenic.h
+++ b/drivers/net/ethernet/alteon/acenic.h
@@ -2,7 +2,7 @@
#ifndef _ACENIC_H_
#define _ACENIC_H_
#include <linux/interrupt.h>
-
+#include <linux/workqueue.h>
/*
* Generate TX index update each time, when TX ring is closed.
@@ -667,8 +667,8 @@ struct ace_private
struct rx_desc *rx_mini_ring;
struct rx_desc *rx_return_ring;
- int tasklet_pending, jumbo;
- struct tasklet_struct ace_tasklet;
+ int bh_work_pending, jumbo;
+ struct work_struct ace_bh_work;
struct event *evt_ring;
@@ -776,7 +776,7 @@ static int ace_open(struct net_device *dev);
static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int ace_close(struct net_device *dev);
-static void ace_tasklet(struct tasklet_struct *t);
+static void ace_bh_work(struct work_struct *work);
static void ace_dump_trace(struct ace_private *ap);
static void ace_set_multicast_list(struct net_device *dev);
static int ace_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 6de0d590be34..9d9fa6559354 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -7,6 +7,21 @@
#define ENA_ADMIN_RSS_KEY_PARTS 10
+#define ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK 0x3F
+#define ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK 0x1F
+
+ /* customer metrics - in correlation with
+ * ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK
+ */
+enum ena_admin_customer_metrics_id {
+ ENA_ADMIN_BW_IN_ALLOWANCE_EXCEEDED = 0,
+ ENA_ADMIN_BW_OUT_ALLOWANCE_EXCEEDED = 1,
+ ENA_ADMIN_PPS_ALLOWANCE_EXCEEDED = 2,
+ ENA_ADMIN_CONNTRACK_ALLOWANCE_EXCEEDED = 3,
+ ENA_ADMIN_LINKLOCAL_ALLOWANCE_EXCEEDED = 4,
+ ENA_ADMIN_CONNTRACK_ALLOWANCE_AVAILABLE = 5,
+};
+
enum ena_admin_aq_opcode {
ENA_ADMIN_CREATE_SQ = 1,
ENA_ADMIN_DESTROY_SQ = 2,
@@ -51,6 +66,9 @@ enum ena_admin_aq_feature_id {
/* device capabilities */
enum ena_admin_aq_caps_id {
ENA_ADMIN_ENI_STATS = 0,
+ /* ENA SRD customer metrics */
+ ENA_ADMIN_ENA_SRD_INFO = 1,
+ ENA_ADMIN_CUSTOMER_METRICS = 2,
};
enum ena_admin_placement_policy_type {
@@ -99,6 +117,9 @@ enum ena_admin_get_stats_type {
ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
/* extra HW stats for specific network interface */
ENA_ADMIN_GET_STATS_TYPE_ENI = 2,
+ /* extra HW stats for ENA SRD */
+ ENA_ADMIN_GET_STATS_TYPE_ENA_SRD = 3,
+ ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS = 4,
};
enum ena_admin_get_stats_scope {
@@ -106,6 +127,16 @@ enum ena_admin_get_stats_scope {
ENA_ADMIN_ETH_TRAFFIC = 1,
};
+/* ENA SRD configuration for ENI */
+enum ena_admin_ena_srd_flags {
+ /* Feature enabled */
+ ENA_ADMIN_ENA_SRD_ENABLED = BIT(0),
+ /* UDP support enabled */
+ ENA_ADMIN_ENA_SRD_UDP_ENABLED = BIT(1),
+ /* Bypass Rx UDP ordering */
+ ENA_ADMIN_ENA_SRD_UDP_ORDERING_BYPASS_ENABLED = BIT(2),
+};
+
struct ena_admin_aq_common_desc {
/* 11:0 : command_id
* 15:12 : reserved12
@@ -363,6 +394,9 @@ struct ena_admin_aq_get_stats_cmd {
* stats of other device
*/
u16 device_id;
+
+ /* a bitmap representing the requested metric values */
+ u64 requested_metrics;
};
/* Basic Statistics Command. */
@@ -419,6 +453,40 @@ struct ena_admin_eni_stats {
u64 linklocal_allowance_exceeded;
};
+struct ena_admin_ena_srd_stats {
+ /* Number of packets transmitted over ENA SRD */
+ u64 ena_srd_tx_pkts;
+
+ /* Number of packets transmitted or could have been
+ * transmitted over ENA SRD
+ */
+ u64 ena_srd_eligible_tx_pkts;
+
+ /* Number of packets received over ENA SRD */
+ u64 ena_srd_rx_pkts;
+
+ /* Percentage of the ENA SRD resources that is in use */
+ u64 ena_srd_resource_utilization;
+};
+
+/* ENA SRD Statistics Command */
+struct ena_admin_ena_srd_info {
+ /* ENA SRD configuration bitmap. See ena_admin_ena_srd_flags for
+ * details
+ */
+ u64 flags;
+
+ struct ena_admin_ena_srd_stats ena_srd_stats;
+};
+
+/* Customer Metrics Command. */
+struct ena_admin_customer_metrics {
+ /* A bitmap representing the reported customer metrics according to
+ * the order they are reported
+ */
+ u64 reported_metrics;
+};
+
struct ena_admin_acq_get_stats_resp {
struct ena_admin_acq_common_desc acq_common_desc;
@@ -428,6 +496,10 @@ struct ena_admin_acq_get_stats_resp {
struct ena_admin_basic_stats basic_stats;
struct ena_admin_eni_stats eni_stats;
+
+ struct ena_admin_ena_srd_info ena_srd_info;
+
+ struct ena_admin_customer_metrics customer_metrics;
} u;
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 713a595370bf..d958cda9e58b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1881,6 +1881,56 @@ int ena_com_get_link_params(struct ena_com_dev *ena_dev,
return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
}
+static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+ struct ena_com_stats_ctx *ctx,
+ enum ena_admin_get_stats_type type)
+{
+ struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
+ struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
+ struct ena_com_admin_queue *admin_queue;
+ int ret;
+
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
+ get_cmd->aq_common_descriptor.flags = 0;
+ get_cmd->type = type;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)get_cmd,
+ sizeof(*get_cmd),
+ (struct ena_admin_acq_entry *)get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
+
+ return ret;
+}
+
+static void ena_com_set_supported_customer_metrics(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics;
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ customer_metrics = &ena_dev->customer_metrics;
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ customer_metrics->supported_metrics = ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK;
+ return;
+ }
+
+ memset(&ctx, 0x0, sizeof(ctx));
+ ctx.get_cmd.requested_metrics = ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK;
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
+ if (likely(ret == 0))
+ customer_metrics->supported_metrics =
+ ctx.get_resp.u.customer_metrics.reported_metrics;
+ else
+ netdev_err(ena_dev->net_device,
+ "Failed to query customer metrics support. error: %d\n", ret);
+}
+
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
@@ -1960,6 +2010,8 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
else
return rc;
+ ena_com_set_supported_customer_metrics(ena_dev);
+
return 0;
}
@@ -2104,50 +2156,44 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
- struct ena_com_stats_ctx *ctx,
- enum ena_admin_get_stats_type type)
+int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_eni_stats *stats)
{
- struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
- struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
- struct ena_com_admin_queue *admin_queue;
+ struct ena_com_stats_ctx ctx;
int ret;
- admin_queue = &ena_dev->admin_queue;
-
- get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
- get_cmd->aq_common_descriptor.flags = 0;
- get_cmd->type = type;
-
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)get_cmd,
- sizeof(*get_cmd),
- (struct ena_admin_acq_entry *)get_resp,
- sizeof(*get_resp));
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+ netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
+ ENA_ADMIN_ENI_STATS);
+ return -EOPNOTSUPP;
+ }
- if (unlikely(ret))
- netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
+ if (likely(ret == 0))
+ memcpy(stats, &ctx.get_resp.u.eni_stats,
+ sizeof(ctx.get_resp.u.eni_stats));
return ret;
}
-int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_eni_stats *stats)
+int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_ena_srd_info *info)
{
struct ena_com_stats_ctx ctx;
int ret;
- if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENA_SRD_INFO)) {
netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
- ENA_ADMIN_ENI_STATS);
+ ENA_ADMIN_ENA_SRD_INFO);
return -EOPNOTSUPP;
}
memset(&ctx, 0x0, sizeof(ctx));
- ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENA_SRD);
if (likely(ret == 0))
- memcpy(stats, &ctx.get_resp.u.eni_stats,
- sizeof(ctx.get_resp.u.eni_stats));
+ memcpy(info, &ctx.get_resp.u.ena_srd_info,
+ sizeof(ctx.get_resp.u.ena_srd_info));
return ret;
}
@@ -2167,6 +2213,50 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
return ret;
}
+int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len)
+{
+ struct ena_admin_aq_get_stats_cmd *get_cmd;
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ if (unlikely(len > ena_dev->customer_metrics.buffer_len)) {
+ netdev_err(ena_dev->net_device,
+ "Invalid buffer size %u. The given buffer is too big.\n", len);
+ return -EINVAL;
+ }
+
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ netdev_err(ena_dev->net_device, "Capability %d not supported.\n",
+ ENA_ADMIN_CUSTOMER_METRICS);
+ return -EOPNOTSUPP;
+ }
+
+ if (!ena_dev->customer_metrics.supported_metrics) {
+ netdev_err(ena_dev->net_device, "No supported customer metrics.\n");
+ return -EOPNOTSUPP;
+ }
+
+ get_cmd = &ctx.get_cmd;
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_com_mem_addr_set(ena_dev,
+ &get_cmd->u.control_buffer.address,
+ ena_dev->customer_metrics.buffer_dma_addr);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device, "Memory address set failed.\n");
+ return ret;
+ }
+
+ get_cmd->u.control_buffer.length = ena_dev->customer_metrics.buffer_len;
+ get_cmd->requested_metrics = ena_dev->customer_metrics.supported_metrics;
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
+ if (likely(ret == 0))
+ memcpy(buffer, ena_dev->customer_metrics.buffer_virt_addr, len);
+ else
+ netdev_err(ena_dev->net_device, "Failed to get customer metrics. error: %d\n", ret);
+
+ return ret;
+}
+
int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
{
struct ena_com_admin_queue *admin_queue;
@@ -2706,6 +2796,24 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
return 0;
}
+int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
+
+ customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE;
+ customer_metrics->buffer_virt_addr = NULL;
+
+ customer_metrics->buffer_virt_addr =
+ dma_alloc_coherent(ena_dev->dmadev, customer_metrics->buffer_len,
+ &customer_metrics->buffer_dma_addr, GFP_KERNEL);
+ if (!customer_metrics->buffer_virt_addr) {
+ customer_metrics->buffer_len = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
@@ -2728,6 +2836,19 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
}
}
+void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
+
+ if (customer_metrics->buffer_virt_addr) {
+ dma_free_coherent(ena_dev->dmadev, customer_metrics->buffer_len,
+ customer_metrics->buffer_virt_addr,
+ customer_metrics->buffer_dma_addr);
+ customer_metrics->buffer_virt_addr = NULL;
+ customer_metrics->buffer_len = 0;
+ }
+}
+
int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 924f03f5a6c7..a372c5e768a7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -42,6 +42,8 @@
#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry))
#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
+#define ENA_CUSTOMER_METRICS_BUFFER_SIZE 512
+
/*****************************************************************************/
/*****************************************************************************/
/* ENA adaptive interrupt moderation settings */
@@ -278,6 +280,16 @@ struct ena_rss {
};
+struct ena_customer_metrics {
+ /* in correlation with ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK
+ * and ena_admin_customer_metrics_id
+ */
+ u64 supported_metrics;
+ dma_addr_t buffer_dma_addr;
+ void *buffer_virt_addr;
+ u32 buffer_len;
+};
+
struct ena_host_attribute {
/* Debug area */
u8 *debug_area_virt_addr;
@@ -327,6 +339,8 @@ struct ena_com_dev {
struct ena_intr_moder_entry *intr_moder_tbl;
struct ena_com_llq_info llq_info;
+
+ struct ena_customer_metrics customer_metrics;
};
struct ena_com_dev_get_features_ctx {
@@ -595,6 +609,24 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
struct ena_admin_eni_stats *stats);
+/* ena_com_get_ena_srd_info - Get ENA SRD network interface statistics
+ * @ena_dev: ENA communication layer struct
+ * @info: ena srd stats and flags
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_ena_srd_info *info);
+
+/* ena_com_get_customer_metrics - Get customer metrics for network interface
+ * @ena_dev: ENA communication layer struct
+ * @buffer: buffer for returned customer metrics
+ * @len: size of the buffer
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len);
+
/* ena_com_set_dev_mtu - Configure the device mtu.
* @ena_dev: ENA communication layer struct
* @mtu: mtu value
@@ -805,6 +837,13 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
u32 debug_area_size);
+/* ena_com_allocate_customer_metrics_buffer - Allocate customer metrics resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev);
+
/* ena_com_delete_debug_area - Free the debug area resources.
* @ena_dev: ENA communication layer struct
*
@@ -819,6 +858,13 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
*/
void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
+/* ena_com_delete_customer_metrics_buffer - Free the customer metrics resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocated customer metrics area.
+ */
+void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev);
+
/* ena_com_set_host_attributes - Update the device with the host
* attributes (debug area and host info) base address.
* @ena_dev: ENA communication layer struct
@@ -975,6 +1021,28 @@ static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev,
return !!(ena_dev->capabilities & BIT(cap_id));
}
+/* ena_com_get_customer_metric_support - query whether device supports a given customer metric.
+ * @ena_dev: ENA communication layer struct
+ * @metric_id: enum value representing the customer metric
+ *
+ * @return - true if customer metric is supported or false otherwise
+ */
+static inline bool ena_com_get_customer_metric_support(struct ena_com_dev *ena_dev,
+ enum ena_admin_customer_metrics_id metric_id)
+{
+ return !!(ena_dev->customer_metrics.supported_metrics & BIT(metric_id));
+}
+
+/* ena_com_get_customer_metric_count - return the number of supported customer metrics.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - the number of supported customer metrics
+ */
+static inline int ena_com_get_customer_metric_count(struct ena_com_dev *ena_dev)
+{
+ return hweight64(ena_dev->customer_metrics.supported_metrics);
+}
+
/* ena_com_update_intr_reg - Prepare interrupt register
* @intr_reg: interrupt register to update.
* @rx_delay_interval: Rx interval in usecs
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index b24cc3f05248..60fb35ec4b15 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -14,6 +14,10 @@ struct ena_stats {
int stat_offset;
};
+struct ena_hw_metrics {
+ char name[ETH_GSTRING_LEN];
+};
+
#define ENA_STAT_ENA_COM_ENTRY(stat) { \
.name = #stat, \
.stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \
@@ -41,6 +45,18 @@ struct ena_stats {
#define ENA_STAT_ENI_ENTRY(stat) \
ENA_STAT_HW_ENTRY(stat, eni_stats)
+#define ENA_STAT_ENA_SRD_ENTRY(stat) \
+ ENA_STAT_HW_ENTRY(stat, ena_srd_stats)
+
+#define ENA_STAT_ENA_SRD_MODE_ENTRY(stat) { \
+ .name = #stat, \
+ .stat_offset = offsetof(struct ena_admin_ena_srd_info, flags) / sizeof(u64) \
+}
+
+#define ENA_METRIC_ENI_ENTRY(stat) { \
+ .name = #stat \
+}
+
static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(tx_timeout),
ENA_STAT_GLOBAL_ENTRY(suspend),
@@ -52,6 +68,9 @@ static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(reset_fail),
};
+/* A partial list of hw stats. Used when admin command
+ * with type ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS is not supported
+ */
static const struct ena_stats ena_stats_eni_strings[] = {
ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
@@ -60,6 +79,23 @@ static const struct ena_stats ena_stats_eni_strings[] = {
ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
};
+static const struct ena_hw_metrics ena_hw_stats_strings[] = {
+ ENA_METRIC_ENI_ENTRY(bw_in_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(bw_out_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(pps_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(conntrack_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(linklocal_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(conntrack_allowance_available),
+};
+
+static const struct ena_stats ena_srd_info_strings[] = {
+ ENA_STAT_ENA_SRD_MODE_ENTRY(ena_srd_mode),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_tx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_eligible_tx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_rx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_resource_utilization)
+};
+
static const struct ena_stats ena_stats_tx_strings[] = {
ENA_STAT_TX_ENTRY(cnt),
ENA_STAT_TX_ENTRY(bytes),
@@ -112,7 +148,9 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
-#define ENA_STATS_ARRAY_ENI(adapter) ARRAY_SIZE(ena_stats_eni_strings)
+#define ENA_STATS_ARRAY_ENI ARRAY_SIZE(ena_stats_eni_strings)
+#define ENA_STATS_ARRAY_ENA_SRD ARRAY_SIZE(ena_srd_info_strings)
+#define ENA_METRICS_ARRAY_ENI ARRAY_SIZE(ena_hw_stats_strings)
static void ena_safe_update_stat(u64 *src, u64 *dst,
struct u64_stats_sync *syncp)
@@ -125,6 +163,57 @@ static void ena_safe_update_stat(u64 *src, u64 *dst,
} while (u64_stats_fetch_retry(syncp, start));
}
+static void ena_metrics_stats(struct ena_adapter *adapter, u64 **data)
+{
+ struct ena_com_dev *dev = adapter->ena_dev;
+ const struct ena_stats *ena_stats;
+ u64 *ptr;
+ int i;
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ u32 supported_metrics_count;
+ int len;
+
+ supported_metrics_count = ena_com_get_customer_metric_count(dev);
+ len = supported_metrics_count * sizeof(u64);
+
+ /* Fill the data buffer, and advance its pointer */
+ ena_com_get_customer_metrics(dev, (char *)(*data), len);
+ (*data) += supported_metrics_count;
+
+ } else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) {
+ ena_com_get_eni_stats(dev, &adapter->eni_stats);
+ /* Updating regardless of rc - once we told ethtool how many stats we have
+ * it will print that much stats. We can't leave holes in the stats
+ */
+ for (i = 0; i < ENA_STATS_ARRAY_ENI; i++) {
+ ena_stats = &ena_stats_eni_strings[i];
+
+ ptr = (u64 *)&adapter->eni_stats +
+ ena_stats->stat_offset;
+
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ }
+ }
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO)) {
+ ena_com_get_ena_srd_info(dev, &adapter->ena_srd_info);
+ /* Get ENA SRD mode */
+ ptr = (u64 *)&adapter->ena_srd_info;
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ for (i = 1; i < ENA_STATS_ARRAY_ENA_SRD; i++) {
+ ena_stats = &ena_srd_info_strings[i];
+ /* Wrapped within an outer struct - need to accommodate an
+ * additional offset of the ENA SRD mode that was already processed
+ */
+ ptr = (u64 *)&adapter->ena_srd_info +
+ ena_stats->stat_offset + 1;
+
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ }
+ }
+}
+
static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
{
const struct ena_stats *ena_stats;
@@ -179,7 +268,7 @@ static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
static void ena_get_stats(struct ena_adapter *adapter,
u64 *data,
- bool eni_stats_needed)
+ bool hw_stats_needed)
{
const struct ena_stats *ena_stats;
u64 *ptr;
@@ -193,17 +282,8 @@ static void ena_get_stats(struct ena_adapter *adapter,
ena_safe_update_stat(ptr, data++, &adapter->syncp);
}
- if (eni_stats_needed) {
- ena_update_hw_stats(adapter);
- for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
- ena_stats = &ena_stats_eni_strings[i];
-
- ptr = (u64 *)&adapter->eni_stats +
- ena_stats->stat_offset;
-
- ena_safe_update_stat(ptr, data++, &adapter->syncp);
- }
- }
+ if (hw_stats_needed)
+ ena_metrics_stats(adapter, &data);
ena_queue_stats(adapter, &data);
ena_dev_admin_queue_stats(adapter, &data);
@@ -214,9 +294,8 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- struct ena_com_dev *dev = adapter->ena_dev;
- ena_get_stats(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
+ ena_get_stats(adapter, data, true);
}
static int ena_get_sw_stats_count(struct ena_adapter *adapter)
@@ -228,9 +307,17 @@ static int ena_get_sw_stats_count(struct ena_adapter *adapter)
static int ena_get_hw_stats_count(struct ena_adapter *adapter)
{
- bool supported = ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS);
+ struct ena_com_dev *dev = adapter->ena_dev;
+ int count;
+
+ count = ENA_STATS_ARRAY_ENA_SRD * ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO);
- return ENA_STATS_ARRAY_ENI(adapter) * supported;
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS))
+ count += ena_com_get_customer_metric_count(dev);
+ else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS))
+ count += ENA_STATS_ARRAY_ENI;
+
+ return count;
}
int ena_get_sset_count(struct net_device *netdev, int sset)
@@ -246,6 +333,35 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
return -EOPNOTSUPP;
}
+static void ena_metrics_stats_strings(struct ena_adapter *adapter, u8 **data)
+{
+ struct ena_com_dev *dev = adapter->ena_dev;
+ const struct ena_hw_metrics *ena_metrics;
+ const struct ena_stats *ena_stats;
+ int i;
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ for (i = 0; i < ENA_METRICS_ARRAY_ENI; i++) {
+ if (ena_com_get_customer_metric_support(dev, i)) {
+ ena_metrics = &ena_hw_stats_strings[i];
+ ethtool_puts(data, ena_metrics->name);
+ }
+ }
+ } else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) {
+ for (i = 0; i < ENA_STATS_ARRAY_ENI; i++) {
+ ena_stats = &ena_stats_eni_strings[i];
+ ethtool_puts(data, ena_stats->name);
+ }
+ }
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO)) {
+ for (i = 0; i < ENA_STATS_ARRAY_ENA_SRD; i++) {
+ ena_stats = &ena_srd_info_strings[i];
+ ethtool_puts(data, ena_stats->name);
+ }
+ }
+}
+
static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
{
const struct ena_stats *ena_stats;
@@ -291,7 +407,7 @@ static void ena_com_dev_strings(u8 **data)
static void ena_get_strings(struct ena_adapter *adapter,
u8 *data,
- bool eni_stats_needed)
+ bool hw_stats_needed)
{
const struct ena_stats *ena_stats;
int i;
@@ -301,12 +417,8 @@ static void ena_get_strings(struct ena_adapter *adapter,
ethtool_puts(&data, ena_stats->name);
}
- if (eni_stats_needed) {
- for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
- ena_stats = &ena_stats_eni_strings[i];
- ethtool_puts(&data, ena_stats->name);
- }
- }
+ if (hw_stats_needed)
+ ena_metrics_stats_strings(adapter, &data);
ena_queue_strings(adapter, &data);
ena_com_dev_strings(&data);
@@ -317,11 +429,10 @@ static void ena_get_ethtool_strings(struct net_device *netdev,
u8 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- struct ena_com_dev *dev = adapter->ena_dev;
switch (sset) {
case ETH_SS_STATS:
- ena_get_strings(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
+ ena_get_strings(adapter, data, true);
break;
}
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 184b6e6cbed4..c5b50cfa935a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2798,19 +2798,6 @@ err:
ena_com_delete_debug_area(adapter->ena_dev);
}
-int ena_update_hw_stats(struct ena_adapter *adapter)
-{
- int rc;
-
- rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
- if (rc) {
- netdev_err(adapter->netdev, "Failed to get ENI stats\n");
- return rc;
- }
-
- return 0;
-}
-
static void ena_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -3944,10 +3931,16 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
+ rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
+ if (rc) {
+ netdev_err(netdev, "ena_com_allocate_customer_metrics_buffer failed\n");
+ goto err_netdev_destroy;
+ }
+
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
if (rc) {
dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n");
- goto err_netdev_destroy;
+ goto err_metrics_destroy;
}
rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state);
@@ -3955,7 +3948,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "ENA device init failed\n");
if (rc == -ETIME)
rc = -EPROBE_DEFER;
- goto err_netdev_destroy;
+ goto err_metrics_destroy;
}
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
@@ -4076,6 +4069,8 @@ err_worker_destroy:
err_device_destroy:
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
+err_metrics_destroy:
+ ena_com_delete_customer_metrics_buffer(ena_dev);
err_netdev_destroy:
free_netdev(netdev);
err_free_region:
@@ -4139,6 +4134,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_com_delete_host_info(ena_dev);
+ ena_com_delete_customer_metrics_buffer(ena_dev);
+
ena_release_bars(ena_dev, pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index d59509747d1a..6e12ae3b12e5 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -373,6 +373,7 @@ struct ena_adapter {
struct u64_stats_sync syncp;
struct ena_stats_dev dev_stats;
struct ena_admin_eni_stats eni_stats;
+ struct ena_admin_ena_srd_info ena_srd_info;
/* last queue index that was checked for uncompleted tx packets */
u32 last_monitored_tx_qid;
@@ -390,7 +391,6 @@ void ena_dump_stats_to_dmesg(struct ena_adapter *adapter);
void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
-int ena_update_hw_stats(struct ena_adapter *adapter);
int ena_update_queue_params(struct ena_adapter *adapter,
u32 new_tx_size,
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index c156566c0906..f19b04b92fa9 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -105,10 +105,6 @@ static struct net_device * __init mvme147lance_probe(void)
macaddr[3] = address&0xff;
eth_hw_addr_set(dev, macaddr);
- printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
- dev->name, dev->base_addr, MVME147_LANCE_IRQ,
- dev->dev_addr);
-
lp = netdev_priv(dev);
lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */
if (!lp->ram) {
@@ -138,6 +134,9 @@ static struct net_device * __init mvme147lance_probe(void)
return ERR_PTR(err);
}
+ netdev_info(dev, "MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
+ dev->base_addr, MVME147_LANCE_IRQ, dev->dev_addr);
+
return dev;
}
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
index 6bdd02b7aa6d..ac37a4e738ae 100644
--- a/drivers/net/ethernet/amd/pds_core/debugfs.c
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -112,7 +112,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
struct pdsc_cq *cq = &qcq->cq;
qcq_dentry = debugfs_create_dir(q->name, pdsc->dentry);
- if (IS_ERR_OR_NULL(qcq_dentry))
+ if (IS_ERR(qcq_dentry))
return;
qcq->dentry = qcq_dentry;
@@ -123,7 +123,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
debugfs_create_x32("accum_work", 0400, qcq_dentry, &qcq->accum_work);
q_dentry = debugfs_create_dir("q", qcq->dentry);
- if (IS_ERR_OR_NULL(q_dentry))
+ if (IS_ERR(q_dentry))
return;
debugfs_create_u32("index", 0400, q_dentry, &q->index);
@@ -135,7 +135,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
debugfs_create_u16("head", 0400, q_dentry, &q->head_idx);
cq_dentry = debugfs_create_dir("cq", qcq->dentry);
- if (IS_ERR_OR_NULL(cq_dentry))
+ if (IS_ERR(cq_dentry))
return;
debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
@@ -148,7 +148,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx];
intr_dentry = debugfs_create_dir("intr", qcq->dentry);
- if (IS_ERR_OR_NULL(intr_dentry))
+ if (IS_ERR(intr_dentry))
return;
debugfs_create_u32("index", 0400, intr_dentry, &intr->index);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index c4a4e316683f..5475867708f4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -403,9 +403,9 @@ static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
return false;
}
-static void xgbe_ecc_isr_task(struct tasklet_struct *t)
+static void xgbe_ecc_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, ecc_bh_work);
unsigned int ecc_isr;
bool stop = false;
@@ -465,17 +465,17 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_ecc);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->ecc_bh_work);
else
- xgbe_ecc_isr_task(&pdata->tasklet_ecc);
+ xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
return IRQ_HANDLED;
}
-static void xgbe_isr_task(struct tasklet_struct *t)
+static void xgbe_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, dev_bh_work);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
unsigned int dma_isr, dma_ch_isr;
@@ -582,7 +582,7 @@ isr_done:
/* If there is not a separate ECC irq, handle it here */
if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
- xgbe_ecc_isr_task(&pdata->tasklet_ecc);
+ xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
/* If there is not a separate I2C irq, handle it here */
if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
@@ -604,10 +604,10 @@ static irqreturn_t xgbe_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_dev);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->dev_bh_work);
else
- xgbe_isr_task(&pdata->tasklet_dev);
+ xgbe_isr_bh_work(&pdata->dev_bh_work);
return IRQ_HANDLED;
}
@@ -1007,8 +1007,8 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
unsigned int i;
int ret;
- tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task);
- tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task);
+ INIT_WORK(&pdata->dev_bh_work, xgbe_isr_bh_work);
+ INIT_WORK(&pdata->ecc_bh_work, xgbe_ecc_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
netdev_name(netdev), pdata);
@@ -1078,8 +1078,8 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
- tasklet_kill(&pdata->tasklet_dev);
- tasklet_kill(&pdata->tasklet_ecc);
+ cancel_work_sync(&pdata->dev_bh_work);
+ cancel_work_sync(&pdata->ecc_bh_work);
if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 21407a26f806..5fc94c2f638e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -582,16 +582,12 @@ static int xgbe_get_ts_info(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (pdata->ptp_clock)
ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
- else
- ts_info->phc_index = -1;
ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index a9ccc4258ee5..7a833894f52a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -274,9 +274,9 @@ static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
}
-static void xgbe_i2c_isr_task(struct tasklet_struct *t)
+static void xgbe_i2c_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_i2c);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, i2c_bh_work);
struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
unsigned int isr;
@@ -321,10 +321,10 @@ static irqreturn_t xgbe_i2c_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_i2c);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->i2c_bh_work);
else
- xgbe_i2c_isr_task(&pdata->tasklet_i2c);
+ xgbe_i2c_isr_bh_work(&pdata->i2c_bh_work);
return IRQ_HANDLED;
}
@@ -369,7 +369,7 @@ static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
static irqreturn_t xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_i2c_isr_task(&pdata->tasklet_i2c);
+ xgbe_i2c_isr_bh_work(&pdata->i2c_bh_work);
return IRQ_HANDLED;
}
@@ -449,7 +449,7 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
if (pdata->dev_irq != pdata->i2c_irq) {
devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
- tasklet_kill(&pdata->tasklet_i2c);
+ cancel_work_sync(&pdata->i2c_bh_work);
}
}
@@ -464,7 +464,7 @@ static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
/* If we have a separate I2C irq, enable it */
if (pdata->dev_irq != pdata->i2c_irq) {
- tasklet_setup(&pdata->tasklet_i2c, xgbe_i2c_isr_task);
+ INIT_WORK(&pdata->i2c_bh_work, xgbe_i2c_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
xgbe_i2c_isr, 0, pdata->i2c_name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4a2dc705b528..07f4f3418d01 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -703,9 +703,9 @@ static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
}
}
-static void xgbe_an_isr_task(struct tasklet_struct *t)
+static void xgbe_an_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_an);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, an_bh_work);
netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
@@ -727,17 +727,17 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_an);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->an_bh_work);
else
- xgbe_an_isr_task(&pdata->tasklet_an);
+ xgbe_an_isr_bh_work(&pdata->an_bh_work);
return IRQ_HANDLED;
}
static irqreturn_t xgbe_an_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_an_isr_task(&pdata->tasklet_an);
+ xgbe_an_isr_bh_work(&pdata->an_bh_work);
return IRQ_HANDLED;
}
@@ -1454,7 +1454,7 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
if (pdata->dev_irq != pdata->an_irq) {
devm_free_irq(pdata->dev, pdata->an_irq, pdata);
- tasklet_kill(&pdata->tasklet_an);
+ cancel_work_sync(&pdata->an_bh_work);
}
pdata->phy_if.phy_impl.stop(pdata);
@@ -1477,7 +1477,7 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
/* If we have a separate AN irq, enable it */
if (pdata->dev_irq != pdata->an_irq) {
- tasklet_setup(&pdata->tasklet_an, xgbe_an_isr_task);
+ INIT_WORK(&pdata->an_bh_work, xgbe_an_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->an_irq,
xgbe_an_isr, 0, pdata->an_name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index c5e5fac49779..c636999a6a84 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -139,7 +139,7 @@ static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata)
return ret;
}
- pdata->isr_as_tasklet = 1;
+ pdata->isr_as_bh_work = 1;
pdata->irq_count = ret;
pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
@@ -176,7 +176,7 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
return ret;
}
- pdata->isr_as_tasklet = pdata->pcidev->msi_enabled ? 1 : 0;
+ pdata->isr_as_bh_work = pdata->pcidev->msi_enabled ? 1 : 0;
pdata->irq_count = 1;
pdata->channel_irq_count = 1;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f01a1e566da6..d85386cac8d1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -1298,11 +1298,11 @@ struct xgbe_prv_data {
unsigned int lpm_ctrl; /* CTRL1 for resume */
- unsigned int isr_as_tasklet;
- struct tasklet_struct tasklet_dev;
- struct tasklet_struct tasklet_ecc;
- struct tasklet_struct tasklet_i2c;
- struct tasklet_struct tasklet_an;
+ unsigned int isr_as_bh_work;
+ struct work_struct dev_bh_work;
+ struct work_struct ecc_bh_work;
+ struct work_struct i2c_bh_work;
+ struct work_struct an_bh_work;
struct dentry *xgbe_debugfs;
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 292b1f9cd9e7..785f4b4ff758 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1317,7 +1317,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0);
- ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
+ ret = request_irq(dev->irq, bmac_misc_intr, IRQF_NO_AUTOEN, "BMAC-misc", dev);
if (ret) {
printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
goto err_out_iounmap_rx;
@@ -1336,7 +1336,6 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
/* Mask chip interrupts and disable chip, will be
* re-enabled on open()
*/
- disable_irq(dev->irq);
pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
if (register_netdev(dev) != 0) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index d0aecd1d7357..440ff4616fec 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -266,7 +266,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names);
const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names);
char tc_string[8];
- int tc;
+ unsigned int tc;
memset(tc_string, 0, sizeof(tc_string));
memcpy(p, aq_ethtool_stat_names,
@@ -275,22 +275,20 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (tc = 0; tc < cfg->tcs; tc++) {
if (cfg->is_qos)
- snprintf(tc_string, 8, "TC%d ", tc);
+ snprintf(tc_string, 8, "TC%u ", tc);
for (i = 0; i < cfg->vecs; i++) {
for (si = 0; si < rx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_rx_stat_names[si],
tc_string,
AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
- p += ETH_GSTRING_LEN;
}
for (si = 0; si < tx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_tx_stat_names[si],
tc_string,
AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
- p += ETH_GSTRING_LEN;
}
}
}
@@ -305,20 +303,18 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (i = 0; i < max(rx_ring_cnt, tx_ring_cnt); i++) {
for (si = 0; si < rx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_rx_stat_names[si],
tc_string,
i ? PTP_HWST_RING_IDX : ptp_ring_idx);
- p += ETH_GSTRING_LEN;
}
if (i >= tx_ring_cnt)
continue;
for (si = 0; si < tx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_tx_stat_names[si],
tc_string,
i ? PTP_HWST_RING_IDX : ptp_ring_idx);
- p += ETH_GSTRING_LEN;
}
}
}
@@ -338,9 +334,8 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_txsc_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_txsc_stat_names[si], i);
- p += ETH_GSTRING_LEN;
}
aq_txsc = &nic->macsec_cfg->aq_txsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
@@ -349,10 +344,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_txsa_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_txsa_stat_names[si],
i, sa);
- p += ETH_GSTRING_LEN;
}
}
}
@@ -369,10 +363,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_rxsa_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_rxsa_stat_names[si],
i, sa);
- p += ETH_GSTRING_LEN;
}
}
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index f7433abd6591..f21de0c21e52 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -557,7 +557,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
}
frag_cnt++;
- next_ = buff_->next,
+ next_ = buff_->next;
buff_ = &self->buff_ring[next_];
is_rsc_completed =
aq_ring_dx_in_range(self->sw_head,
@@ -583,7 +583,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
err = -EIO;
goto err_exit;
}
- next_ = buff_->next,
+ next_ = buff_->next;
buff_ = &self->buff_ring[next_];
buff_->is_cleaned = true;
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 482c58c4c584..bec5cdf8d1da 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_ATHEROS
bool "Atheros devices"
default y
- depends on (PCI || ATH79)
+ depends on PCI || ATH79 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -19,7 +19,7 @@ if NET_VENDOR_ATHEROS
config AG71XX
tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support"
- depends on ATH79
+ depends on ATH79 || COMPILE_TEST
select PHYLINK
imply NET_SELFTESTS
help
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index a38be924cdaa..9586b6894f7e 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -149,11 +149,11 @@
#define FIFO_CFG4_MC BIT(8) /* Multicast Packet */
#define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */
#define FIFO_CFG4_DR BIT(10) /* Dribble */
-#define FIFO_CFG4_LE BIT(11) /* Long Event */
-#define FIFO_CFG4_CF BIT(12) /* Control Frame */
-#define FIFO_CFG4_PF BIT(13) /* Pause Frame */
-#define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */
-#define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */
+#define FIFO_CFG4_CF BIT(11) /* Control Frame */
+#define FIFO_CFG4_PF BIT(12) /* Pause Frame */
+#define FIFO_CFG4_UO BIT(13) /* Unsupported Opcode */
+#define FIFO_CFG4_VT BIT(14) /* VLAN tag detected */
+#define FIFO_CFG4_LE BIT(15) /* Long Event */
#define FIFO_CFG4_FT BIT(16) /* Frame Truncated */
#define FIFO_CFG4_UC BIT(17) /* Unicast Packet */
#define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
@@ -168,28 +168,28 @@
#define FIFO_CFG5_DV BIT(1) /* RX_DV Event */
#define FIFO_CFG5_FC BIT(2) /* False Carrier */
#define FIFO_CFG5_CE BIT(3) /* Code Error */
-#define FIFO_CFG5_LM BIT(4) /* Length Mismatch */
-#define FIFO_CFG5_LO BIT(5) /* Length Out of Range */
-#define FIFO_CFG5_OK BIT(6) /* Packet is OK */
-#define FIFO_CFG5_MC BIT(7) /* Multicast Packet */
-#define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */
-#define FIFO_CFG5_DR BIT(9) /* Dribble */
-#define FIFO_CFG5_CF BIT(10) /* Control Frame */
-#define FIFO_CFG5_PF BIT(11) /* Pause Frame */
-#define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */
-#define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */
-#define FIFO_CFG5_LE BIT(14) /* Long Event */
-#define FIFO_CFG5_FT BIT(15) /* Frame Truncated */
-#define FIFO_CFG5_16 BIT(16) /* unknown */
-#define FIFO_CFG5_17 BIT(17) /* unknown */
+#define FIFO_CFG5_CR BIT(4) /* CRC error */
+#define FIFO_CFG5_LM BIT(5) /* Length Mismatch */
+#define FIFO_CFG5_LO BIT(6) /* Length Out of Range */
+#define FIFO_CFG5_OK BIT(7) /* Packet is OK */
+#define FIFO_CFG5_MC BIT(8) /* Multicast Packet */
+#define FIFO_CFG5_BC BIT(9) /* Broadcast Packet */
+#define FIFO_CFG5_DR BIT(10) /* Dribble */
+#define FIFO_CFG5_CF BIT(11) /* Control Frame */
+#define FIFO_CFG5_PF BIT(12) /* Pause Frame */
+#define FIFO_CFG5_UO BIT(13) /* Unsupported Opcode */
+#define FIFO_CFG5_VT BIT(14) /* VLAN tag detected */
+#define FIFO_CFG5_LE BIT(15) /* Long Event */
+#define FIFO_CFG5_FT BIT(16) /* Frame Truncated */
+#define FIFO_CFG5_UC BIT(17) /* Unicast Packet */
#define FIFO_CFG5_SF BIT(18) /* Short Frame */
#define FIFO_CFG5_BM BIT(19) /* Byte Mode */
#define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
- FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
- FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
- FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
- FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
- FIFO_CFG5_17 | FIFO_CFG5_SF)
+ FIFO_CFG5_CE | FIFO_CFG5_LM | FIFO_CFG5_LO | \
+ FIFO_CFG5_OK | FIFO_CFG5_MC | FIFO_CFG5_BC | \
+ FIFO_CFG5_DR | FIFO_CFG5_CF | FIFO_CFG5_UO | \
+ FIFO_CFG5_VT | FIFO_CFG5_LE | FIFO_CFG5_FT | \
+ FIFO_CFG5_UC | FIFO_CFG5_SF)
#define AG71XX_REG_TX_CTRL 0x0180
#define TX_CTRL_TXE BIT(0) /* Tx Enable */
@@ -379,10 +379,7 @@ struct ag71xx {
u32 fifodata[3];
int mac_idx;
- struct reset_control *mdio_reset;
- struct mii_bus *mii_bus;
struct clk *clk_mdio;
- struct clk *clk_eth;
};
static int ag71xx_desc_empty(struct ag71xx_desc *desc)
@@ -447,6 +444,13 @@ static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
}
+static int ag71xx_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ return phylink_mii_ioctl(ag->phylink, ifr, cmd);
+}
+
static void ag71xx_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
@@ -504,8 +508,7 @@ static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
switch (sset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- ag71xx_statistics[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, ag71xx_statistics[i].name);
break;
case ETH_SS_TEST:
net_selftest_get_strings(data);
@@ -685,36 +688,27 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
{
struct device *dev = &ag->pdev->dev;
struct net_device *ndev = ag->ndev;
+ struct reset_control *mdio_reset;
static struct mii_bus *mii_bus;
struct device_node *np, *mnp;
int err;
np = dev->of_node;
- ag->mii_bus = NULL;
- ag->clk_mdio = devm_clk_get(dev, "mdio");
+ ag->clk_mdio = devm_clk_get_enabled(dev, "mdio");
if (IS_ERR(ag->clk_mdio)) {
netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
return PTR_ERR(ag->clk_mdio);
}
- err = clk_prepare_enable(ag->clk_mdio);
- if (err) {
- netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
- return err;
- }
-
mii_bus = devm_mdiobus_alloc(dev);
- if (!mii_bus) {
- err = -ENOMEM;
- goto mdio_err_put_clk;
- }
+ if (!mii_bus)
+ return -ENOMEM;
- ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
- if (IS_ERR(ag->mdio_reset)) {
+ mdio_reset = devm_reset_control_get_exclusive(dev, "mdio");
+ if (IS_ERR(mdio_reset)) {
netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
- err = PTR_ERR(ag->mdio_reset);
- goto mdio_err_put_clk;
+ return PTR_ERR(mdio_reset);
}
mii_bus->name = "ag71xx_mdio";
@@ -725,33 +719,18 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
mii_bus->parent = dev;
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
- if (!IS_ERR(ag->mdio_reset)) {
- reset_control_assert(ag->mdio_reset);
- msleep(100);
- reset_control_deassert(ag->mdio_reset);
- msleep(200);
- }
+ reset_control_assert(mdio_reset);
+ msleep(100);
+ reset_control_deassert(mdio_reset);
+ msleep(200);
mnp = of_get_child_by_name(np, "mdio");
- err = of_mdiobus_register(mii_bus, mnp);
+ err = devm_of_mdiobus_register(dev, mii_bus, mnp);
of_node_put(mnp);
if (err)
- goto mdio_err_put_clk;
-
- ag->mii_bus = mii_bus;
+ return err;
return 0;
-
-mdio_err_put_clk:
- clk_disable_unprepare(ag->clk_mdio);
- return err;
-}
-
-static void ag71xx_mdio_remove(struct ag71xx *ag)
-{
- if (ag->mii_bus)
- mdiobus_unregister(ag->mii_bus);
- clk_disable_unprepare(ag->clk_mdio);
}
static void ag71xx_hw_stop(struct ag71xx *ag)
@@ -1637,7 +1616,6 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
unsigned int i = ring->curr & ring_mask;
struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
int pktlen;
- int err = 0;
if (ag71xx_desc_empty(desc))
break;
@@ -1660,6 +1638,7 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
if (!skb) {
+ ndev->stats.rx_errors++;
skb_free_frag(ring->buf[i].rx.rx_buf);
goto next;
}
@@ -1667,14 +1646,9 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
skb_reserve(skb, offset);
skb_put(skb, pktlen);
- if (err) {
- ndev->stats.rx_dropped++;
- kfree_skb(skb);
- } else {
- skb->dev = ndev;
- skb->ip_summed = CHECKSUM_NONE;
- list_add_tail(&skb->list, &rx_list);
- }
+ skb->dev = ndev;
+ skb->ip_summed = CHECKSUM_NONE;
+ list_add_tail(&skb->list, &rx_list);
next:
ring->buf[i].rx.rx_buf = NULL;
@@ -1799,7 +1773,7 @@ static const struct net_device_ops ag71xx_netdev_ops = {
.ndo_open = ag71xx_open,
.ndo_stop = ag71xx_stop,
.ndo_start_xmit = ag71xx_hard_start_xmit,
- .ndo_eth_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = ag71xx_do_ioctl,
.ndo_tx_timeout = ag71xx_tx_timeout,
.ndo_change_mtu = ag71xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
@@ -1816,6 +1790,7 @@ static int ag71xx_probe(struct platform_device *pdev)
const struct ag71xx_dcfg *dcfg;
struct net_device *ndev;
struct resource *res;
+ struct clk *clk_eth;
int tx_size, err, i;
struct ag71xx *ag;
@@ -1846,10 +1821,10 @@ static int ag71xx_probe(struct platform_device *pdev)
return -EINVAL;
}
- ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
- if (IS_ERR(ag->clk_eth)) {
+ clk_eth = devm_clk_get_enabled(&pdev->dev, "eth");
+ if (IS_ERR(clk_eth)) {
netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
- return PTR_ERR(ag->clk_eth);
+ return PTR_ERR(clk_eth);
}
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1870,6 +1845,12 @@ static int ag71xx_probe(struct platform_device *pdev)
if (!ag->mac_base)
return -ENOMEM;
+ /* ensure that HW is in manual polling mode before interrupts are
+ * activated. Otherwise ag71xx_interrupt might call napi_schedule
+ * before it is initialized by netif_napi_add.
+ */
+ ag71xx_int_disable(ag, AG71XX_INT_POLL);
+
ndev->irq = platform_get_irq(pdev, 0);
err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
0x0, dev_name(&pdev->dev), ndev);
@@ -1912,6 +1893,8 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc->next = (u32)ag->stop_desc_dma;
err = of_get_ethdev_address(np, ndev);
+ if (err == -EPROBE_DEFER)
+ return err;
if (err) {
netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
eth_hw_addr_random(ndev);
@@ -1926,33 +1909,27 @@ static int ag71xx_probe(struct platform_device *pdev)
netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll,
AG71XX_NAPI_WEIGHT);
- err = clk_prepare_enable(ag->clk_eth);
- if (err) {
- netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
- return err;
- }
-
ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
ag71xx_hw_init(ag);
err = ag71xx_mdio_probe(ag);
if (err)
- goto err_put_clk;
+ return err;
platform_set_drvdata(pdev, ndev);
err = ag71xx_phylink_setup(ag);
if (err) {
netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
- goto err_mdio_remove;
+ return err;
}
- err = register_netdev(ndev);
+ err = devm_register_netdev(&pdev->dev, ndev);
if (err) {
netif_err(ag, probe, ndev, "unable to register net device\n");
platform_set_drvdata(pdev, NULL);
- goto err_mdio_remove;
+ return err;
}
netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
@@ -1960,27 +1937,6 @@ static int ag71xx_probe(struct platform_device *pdev)
phy_modes(ag->phy_if_mode));
return 0;
-
-err_mdio_remove:
- ag71xx_mdio_remove(ag);
-err_put_clk:
- clk_disable_unprepare(ag->clk_eth);
- return err;
-}
-
-static void ag71xx_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct ag71xx *ag;
-
- if (!ndev)
- return;
-
- ag = netdev_priv(ndev);
- unregister_netdev(ndev);
- ag71xx_mdio_remove(ag);
- clk_disable_unprepare(ag->clk_eth);
- platform_set_drvdata(pdev, NULL);
}
static const u32 ar71xx_fifo_ar7100[] = {
@@ -2064,10 +2020,10 @@ static const struct of_device_id ag71xx_match[] = {
{ .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
{}
};
+MODULE_DEVICE_TABLE(of, ag71xx_match);
static struct platform_driver ag71xx_driver = {
.probe = ag71xx_probe,
- .remove_new = ag71xx_remove,
.driver = {
.name = "ag71xx",
.of_match_table = ag71xx_match,
@@ -2075,4 +2031,5 @@ static struct platform_driver ag71xx_driver = {
};
module_platform_driver(ag71xx_driver);
+MODULE_DESCRIPTION("Atheros AR71xx built-in ethernet mac driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index 20c6529ec135..297c2682a9cf 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -1300,9 +1300,9 @@ static void bcmasp_remove_intfs(struct bcmasp_priv *priv)
static int bcmasp_probe(struct platform_device *pdev)
{
- struct device_node *ports_node, *intf_node;
const struct bcmasp_plat_data *pdata;
struct device *dev = &pdev->dev;
+ struct device_node *ports_node;
struct bcmasp_priv *priv;
struct bcmasp_intf *intf;
int ret = 0, count = 0;
@@ -1374,12 +1374,11 @@ static int bcmasp_probe(struct platform_device *pdev)
}
i = 0;
- for_each_available_child_of_node(ports_node, intf_node) {
+ for_each_available_child_of_node_scoped(ports_node, intf_node) {
intf = bcmasp_interface_create(priv, intf_node, i);
if (!intf) {
dev_err(dev, "Cannot create eth interface %d\n", i);
bcmasp_remove_intfs(priv);
- of_node_put(intf_node);
ret = -ENOMEM;
goto of_put_exit;
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index 484fc2b5626f..ca163c8e3729 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "bcmasp_ethtool: " fmt
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 82768b0e9026..9ea16ef4139d 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -322,6 +322,7 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Rewind so we do not have a hole */
spb_index = intf->tx_spb_index;
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index c9faa8540859..0a68b526e4a8 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1359,6 +1359,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
skb->data, skb_len);
ret = NETDEV_TX_OK;
+ dev_kfree_skb_any(skb);
goto out;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index c7b56a5e5425..adf7b6b94941 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3640,16 +3640,12 @@ static int bnx2x_get_ts_info(struct net_device *dev,
if (bp->flags & PTP_SUPPORTED) {
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (bp->ptp_clock)
info->phc_index = ptp_clock_index(bp->ptp_clock);
- else
- info->phc_index = -1;
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 4e9215bce4ad..a018f251d198 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -868,6 +868,8 @@
#define DORQ_REG_VF_TYPE_VALUE_0 0x170258
#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340
+extern const u32 dmae_reg_go_c[];
+
/* [RW 4] Initial activity counter value on the load request; when the
shortcut is done. */
#define DORQ_REG_SHRT_ACT_CNT 0x170070
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 77d4cb4ad782..12198fc3ab22 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2652,10 +2652,10 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
/* vlan */
if (bulletin->valid_bitmap & (1 << VLAN_VALID))
/* vlan configured by ndo so its in bulletin board */
- memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
+ ivi->vlan = bulletin->vlan;
else
/* function has not been loaded yet. Show vlans as 0s */
- memset(&ivi->vlan, 0, VLAN_HLEN);
+ ivi->vlan = 0;
mutex_unlock(&bp->vfdb->bulletin_mutex);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 2bb133ae61c3..ba6729f2f9c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -23,8 +23,6 @@
#include "bnx2x_cmn.h"
#include "bnx2x_sriov.h"
-extern const u32 dmae_reg_go_c[];
-
/* Statistics */
/*
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e27e1082ee33..6e422e24750a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -69,6 +69,7 @@
#include "bnxt_tc.h"
#include "bnxt_devlink.h"
#include "bnxt_debugfs.h"
+#include "bnxt_coredump.h"
#include "bnxt_hwmon.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@@ -301,10 +302,6 @@ static bool bnxt_vf_pciid(enum board_idx idx)
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
-#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
-
-#define BNXT_CP_DB_IRQ_DIS(db) \
- writel(DB_CP_IRQ_DIS_FLAGS, db)
#define BNXT_DB_CQ(db, idx) \
writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
@@ -2853,34 +2850,6 @@ static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
return TX_CMP_VALID(txcmp, raw_cons);
}
-static irqreturn_t bnxt_inta(int irq, void *dev_instance)
-{
- struct bnxt_napi *bnapi = dev_instance;
- struct bnxt *bp = bnapi->bp;
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- u32 cons = RING_CMP(cpr->cp_raw_cons);
- u32 int_status;
-
- prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
-
- if (!bnxt_has_work(bp, cpr)) {
- int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
- /* return if erroneous interrupt */
- if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
- return IRQ_NONE;
- }
-
- /* disable ring IRQ */
- BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
-
- /* Return here if interrupt is shared and is disabled. */
- if (unlikely(atomic_read(&bp->intr_sem) != 0))
- return IRQ_HANDLED;
-
- napi_schedule(&bnapi->napi);
- return IRQ_HANDLED;
-}
-
static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
int budget)
{
@@ -5056,7 +5025,7 @@ void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
list_del_init(&fltr->list);
}
-void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
+static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
{
struct bnxt_filter_base *usr_fltr, *tmp;
@@ -6579,7 +6548,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
req->lb_rule = cpu_to_le16(0xffff);
vnic_mru:
- req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
+ vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ req->mru = cpu_to_le16(vnic->mru);
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
#ifdef CONFIG_BNXT_SRIOV
@@ -6715,6 +6685,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
+ bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
}
hwrm_req_drop(bp, req);
return rc;
@@ -6872,15 +6844,14 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
req->cq_handle = cpu_to_le64(ring->handle);
req->enables |= cpu_to_le32(
RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
- } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
+ } else {
req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
}
break;
case HWRM_RING_ALLOC_NQ:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
req->length = cpu_to_le32(bp->cp_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
break;
default:
netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
@@ -8943,6 +8914,80 @@ skip_rdma:
return 0;
}
+static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
+{
+ struct hwrm_dbg_crashdump_medium_cfg_input *req;
+ u16 page_attr;
+ int rc;
+
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
+ return 0;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
+ if (rc)
+ return rc;
+
+ if (BNXT_PAGE_SIZE == 0x2000)
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
+ else if (BNXT_PAGE_SIZE == 0x10000)
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
+ else
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
+ req->pg_size_lvl = cpu_to_le16(page_attr |
+ bp->fw_crash_mem->ring_mem.depth);
+ req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
+ req->size = cpu_to_le32(bp->fw_crash_len);
+ req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
+ return hwrm_req_send(bp, req);
+}
+
+static void bnxt_free_crash_dump_mem(struct bnxt *bp)
+{
+ if (bp->fw_crash_mem) {
+ bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
+ kfree(bp->fw_crash_mem);
+ bp->fw_crash_mem = NULL;
+ }
+}
+
+static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
+{
+ u32 mem_size = 0;
+ int rc;
+
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
+ return 0;
+
+ rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
+ if (rc)
+ return rc;
+
+ mem_size = round_up(mem_size, 4);
+
+ /* keep and use the existing pages */
+ if (bp->fw_crash_mem &&
+ mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
+ goto alloc_done;
+
+ if (bp->fw_crash_mem)
+ bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
+ else
+ bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem),
+ GFP_KERNEL);
+ if (!bp->fw_crash_mem)
+ return -ENOMEM;
+
+ rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
+ if (rc) {
+ bnxt_free_crash_dump_mem(bp);
+ return rc;
+ }
+
+alloc_done:
+ bp->fw_crash_len = mem_size;
+ return 0;
+}
+
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
{
struct hwrm_func_resource_qcaps_output *resp;
@@ -9118,6 +9163,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
@@ -10089,6 +10136,26 @@ vnic_setup_err:
return rc;
}
+int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u8 valid)
+{
+ struct hwrm_vnic_update_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
+ req->mru = cpu_to_le16(vnic->mru);
+
+ req->enables = cpu_to_le32(valid);
+
+ return hwrm_req_send(bp, req);
+}
+
int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc;
@@ -10248,7 +10315,7 @@ static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
}
}
-void bnxt_clear_rss_ctxs(struct bnxt *bp)
+static void bnxt_clear_rss_ctxs(struct bnxt *bp)
{
struct ethtool_rxfh_context *ctx;
unsigned long context;
@@ -10553,22 +10620,32 @@ static void bnxt_setup_msix(struct bnxt *bp)
}
}
-static void bnxt_setup_inta(struct bnxt *bp)
+static int bnxt_init_int_mode(struct bnxt *bp);
+
+static int bnxt_change_msix(struct bnxt *bp, int total)
{
- const int len = sizeof(bp->irq_tbl[0].name);
+ struct msi_map map;
+ int i;
- if (bp->num_tc) {
- netdev_reset_tc(bp->dev);
- bp->num_tc = 0;
+ /* add MSIX to the end if needed */
+ for (i = bp->total_irqs; i < total; i++) {
+ map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
+ if (map.index < 0)
+ return bp->total_irqs;
+ bp->irq_tbl[i].vector = map.virq;
+ bp->total_irqs++;
}
- snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
- 0);
- bp->irq_tbl[0].handler = bnxt_inta;
+ /* trim MSIX from the end if needed */
+ for (i = bp->total_irqs; i > total; i--) {
+ map.index = i - 1;
+ map.virq = bp->irq_tbl[i - 1].vector;
+ pci_msix_free_irq(bp->pdev, map);
+ bp->total_irqs--;
+ }
+ return bp->total_irqs;
}
-static int bnxt_init_int_mode(struct bnxt *bp);
-
static int bnxt_setup_int_mode(struct bnxt *bp)
{
int rc;
@@ -10579,10 +10656,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc ?: -ENODEV;
}
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- bnxt_setup_msix(bp);
- else
- bnxt_setup_inta(bp);
+ bnxt_setup_msix(bp);
rc = bnxt_set_real_num_queues(bp);
return rc;
@@ -10670,10 +10744,9 @@ static int bnxt_get_num_msix(struct bnxt *bp)
return bnxt_nq_rings_in_use(bp);
}
-static int bnxt_init_msix(struct bnxt *bp)
+static int bnxt_init_int_mode(struct bnxt *bp)
{
- int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp;
- struct msix_entry *msix_ent;
+ int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
total_vecs = bnxt_get_num_msix(bp);
max = bnxt_get_max_func_irqs(bp);
@@ -10683,29 +10756,24 @@ static int bnxt_init_msix(struct bnxt *bp)
if (!total_vecs)
return 0;
- msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
- if (!msix_ent)
- return -ENOMEM;
-
- for (i = 0; i < total_vecs; i++) {
- msix_ent[i].entry = i;
- msix_ent[i].vector = 0;
- }
-
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
min = 2;
- total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
+ total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
+ PCI_IRQ_MSIX);
ulp_msix = bnxt_get_ulp_msix_num(bp);
if (total_vecs < 0 || total_vecs < ulp_msix) {
rc = -ENODEV;
goto msix_setup_exit;
}
- bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
+ tbl_size = total_vecs;
+ if (pci_msix_can_alloc_dyn(bp->pdev))
+ tbl_size = max;
+ bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL);
if (bp->irq_tbl) {
for (i = 0; i < total_vecs; i++)
- bp->irq_tbl[i].vector = msix_ent[i].vector;
+ bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
bp->total_irqs = total_vecs;
/* Trim rings based upon num of vectors allocated */
@@ -10723,61 +10791,28 @@ static int bnxt_init_msix(struct bnxt *bp)
rc = -ENOMEM;
goto msix_setup_exit;
}
- bp->flags |= BNXT_FLAG_USING_MSIX;
- kfree(msix_ent);
return 0;
msix_setup_exit:
- netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
+ netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
- pci_disable_msix(bp->pdev);
- kfree(msix_ent);
- return rc;
-}
-
-static int bnxt_init_inta(struct bnxt *bp)
-{
- bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
- if (!bp->irq_tbl)
- return -ENOMEM;
-
- bp->total_irqs = 1;
- bp->rx_nr_rings = 1;
- bp->tx_nr_rings = 1;
- bp->cp_nr_rings = 1;
- bp->flags |= BNXT_FLAG_SHARED_RINGS;
- bp->irq_tbl[0].vector = bp->pdev->irq;
- return 0;
-}
-
-static int bnxt_init_int_mode(struct bnxt *bp)
-{
- int rc = -ENODEV;
-
- if (bp->flags & BNXT_FLAG_MSIX_CAP)
- rc = bnxt_init_msix(bp);
-
- if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
- /* fallback to INTA */
- rc = bnxt_init_inta(bp);
- }
+ pci_free_irq_vectors(bp->pdev);
return rc;
}
static void bnxt_clear_int_mode(struct bnxt *bp)
{
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- pci_disable_msix(bp->pdev);
+ pci_free_irq_vectors(bp->pdev);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
- bp->flags &= ~BNXT_FLAG_USING_MSIX;
}
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
bool irq_cleared = false;
+ bool irq_change = false;
int tcs = bp->num_tc;
int irqs_required;
int rc;
@@ -10796,15 +10831,21 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
}
if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
- bnxt_ulp_irq_stop(bp);
- bnxt_clear_int_mode(bp);
- irq_cleared = true;
+ irq_change = true;
+ if (!pci_msix_can_alloc_dyn(bp->pdev)) {
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+ irq_cleared = true;
+ }
}
rc = __bnxt_reserve_rings(bp);
if (irq_cleared) {
if (!rc)
rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
+ } else if (irq_change && !rc) {
+ if (bnxt_change_msix(bp, irqs_required) != irqs_required)
+ rc = -ENOSPC;
}
if (rc) {
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
@@ -10870,9 +10911,6 @@ static int bnxt_request_irq(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
rmap = bp->dev->rx_cpu_rmap;
#endif
- if (!(bp->flags & BNXT_FLAG_USING_MSIX))
- flags = IRQF_SHARED;
-
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
int map_idx = bnxt_cp_num_to_irq_num(bp, i);
struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
@@ -10937,29 +10975,22 @@ static void bnxt_del_napi(struct bnxt *bp)
static void bnxt_init_napi(struct bnxt *bp)
{
- int i;
+ int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
unsigned int cp_nr_rings = bp->cp_nr_rings;
struct bnxt_napi *bnapi;
+ int i;
- if (bp->flags & BNXT_FLAG_USING_MSIX) {
- int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
-
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- poll_fn = bnxt_poll_p5;
- else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
- cp_nr_rings--;
- for (i = 0; i < cp_nr_rings; i++) {
- bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
- }
- if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
- bnapi = bp->bnapi[cp_nr_rings];
- netif_napi_add(bp->dev, &bnapi->napi,
- bnxt_poll_nitroa0);
- }
- } else {
- bnapi = bp->bnapi[0];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ poll_fn = bnxt_poll_p5;
+ else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ cp_nr_rings--;
+ for (i = 0; i < cp_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
+ }
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ bnapi = bp->bnapi[cp_nr_rings];
+ netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
}
}
@@ -11947,20 +11978,6 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
return rc;
}
-/* Common routine to pre-map certain register block to different GRC window.
- * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
- * in PF and 3 windows in VF that can be customized to map in different
- * register blocks.
- */
-static void bnxt_preset_reg_win(struct bnxt *bp)
-{
- if (BNXT_PF(bp)) {
- /* CAG registers map to GRC window #4 */
- writel(BNXT_CAG_REG_BASE,
- bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
- }
-}
-
static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
static int bnxt_reinit_after_abort(struct bnxt *bp)
@@ -12065,7 +12082,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
- bnxt_preset_reg_win(bp);
netif_carrier_off(bp->dev);
if (irq_re_init) {
/* Reserve rings now if none were reserved at driver probe. */
@@ -12078,12 +12094,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = bnxt_reserve_rings(bp, irq_re_init);
if (rc)
return rc;
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_USING_MSIX)) {
- /* disable RFS if falling back to INTA */
- bp->dev->hw_features &= ~NETIF_F_NTUPLE;
- bp->flags &= ~BNXT_FLAG_RFS;
- }
rc = bnxt_alloc_mem(bp, irq_re_init);
if (rc) {
@@ -12810,7 +12820,7 @@ bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
!BNXT_SUPPORTS_NTUPLE_VNIC(bp))
return bnxt_rfs_supported(bp);
- if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
+ if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
hwr.grp = bp->rx_nr_rings;
@@ -13793,6 +13803,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
struct bnxt_hw_rings hwr = {0};
int rx_rings = rx;
+ int rc;
if (tcs)
tx_sets = tcs;
@@ -13825,7 +13836,23 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
}
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
hwr.cp_p5 = hwr.tx + rx;
- return bnxt_hwrm_check_rings(bp, &hwr);
+ rc = bnxt_hwrm_check_rings(bp, &hwr);
+ if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
+ if (!bnxt_ulp_registered(bp->edev)) {
+ hwr.cp += bnxt_get_ulp_msix_num(bp);
+ hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
+ }
+ if (hwr.cp > bp->total_irqs) {
+ int total_msix = bnxt_change_msix(bp, hwr.cp);
+
+ if (total_msix < hwr.cp) {
+ netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
+ hwr.cp, total_msix);
+ rc = -ENOSPC;
+ }
+ }
+ }
+ return rc;
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -13963,6 +13990,19 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
if (rc)
return -ENODEV;
+ rc = bnxt_alloc_crash_dump_mem(bp);
+ if (rc)
+ netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
+ rc);
+ if (!rc) {
+ rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
+ if (rc) {
+ bnxt_free_crash_dump_mem(bp);
+ netdev_warn(bp->dev,
+ "hwrm crash dump mem failure rc: %d\n", rc);
+ }
+ }
+
if (bnxt_fw_pre_resv_vnics(bp))
bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
@@ -15154,7 +15194,8 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr, *clone;
struct bnxt_cp_ring_info *cpr;
- int rc;
+ struct bnxt_vnic_info *vnic;
+ int i, rc;
rxr = &bp->rx_ring[idx];
clone = qmem;
@@ -15179,11 +15220,16 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
- napi_enable(&rxr->bnapi->napi);
-
cpr = &rxr->bnapi->cp_ring;
cpr->sw_stats->rx.rx_resets++;
+ for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ vnic = &bp->vnic_info[i];
+ vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+ }
+
return 0;
err_free_hwrm_rx_ring:
@@ -15195,9 +15241,17 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr;
+ struct bnxt_vnic_info *vnic;
+ int i;
+
+ for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ vnic = &bp->vnic_info[i];
+ vnic->mru = 0;
+ bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+ }
rxr = &bp->rx_ring[idx];
- napi_disable(&rxr->bnapi->napi);
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
rxr->rx_next_cons = 0;
@@ -15257,6 +15311,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
+ bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp);
@@ -15644,6 +15699,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_is_bridge(pdev))
return -ENODEV;
+ if (!pdev->msix_cap) {
+ dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
+ return -ENODEV;
+ }
+
/* Clear any pending DMA transactions from crash kernel
* while loading driver in capture kernel.
*/
@@ -15670,9 +15730,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_PF(bp))
SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
- if (pdev->msix_cap)
- bp->flags |= BNXT_FLAG_MSIX_CAP;
-
rc = bnxt_init_board(pdev, dev);
if (rc < 0)
goto init_err_free;
@@ -15681,7 +15738,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->stat_ops = &bnxt_stat_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
- dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
pci_set_drvdata(pdev, dev);
rc = bnxt_alloc_hwrm_resources(bp);
@@ -15862,6 +15918,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
+ if (BNXT_SUPPORTS_QUEUE_API(bp))
+ dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
rc = register_netdev(dev);
if (rc)
@@ -15895,6 +15953,7 @@ init_err_pci_clean:
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
+ bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
@@ -15986,6 +16045,8 @@ static int bnxt_resume(struct device *device)
rc = -ENODEV;
goto resume_exit;
}
+ if (bp->fw_crash_mem)
+ bnxt_hwrm_crash_dump_mem_cfg(bp);
bnxt_get_wol_settings(bp);
if (netif_running(dev)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 6bbdc718c3a7..69231e85140b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1217,12 +1217,15 @@ struct bnxt_napi {
bool in_reset;
};
+/* "TxRx", 2 hypens, plus maximum integer */
+#define BNXT_IRQ_NAME_EXTRA 17
+
struct bnxt_irq {
irq_handler_t handler;
unsigned int vector;
u8 requested:1;
u8 have_cpumask:1;
- char name[IFNAMSIZ + 2];
+ char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA];
cpumask_var_t cpu_mask;
};
@@ -1250,6 +1253,7 @@ struct bnxt_vnic_info {
#define BNXT_MAX_CTX_PER_VNIC 8
u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id;
+ u16 mru;
#define BNXT_MAX_UC_ADDRS 4
struct bnxt_l2_filter *l2_filters[BNXT_MAX_UC_ADDRS];
/* index 0 always dev_addr */
@@ -1355,7 +1359,6 @@ struct bnxt_vf_info {
u16 vlan;
u16 func_qcfg_flags;
u32 flags;
-#define BNXT_VF_QOS 0x1
#define BNXT_VF_SPOOFCHK 0x2
#define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8
@@ -1755,8 +1758,6 @@ struct bnxt_test_info {
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
-#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
-#define BNXT_CAG_REG_BASE 0x300000
#define BNXT_GRC_REG_STATUS_P5 0x520
@@ -2199,8 +2200,6 @@ struct bnxt {
#define BNXT_FLAG_STRIP_VLAN 0x20
#define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
BNXT_FLAG_LRO)
- #define BNXT_FLAG_USING_MSIX 0x40
- #define BNXT_FLAG_MSIX_CAP 0x80
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
@@ -2437,6 +2436,7 @@ struct bnxt {
#define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37)
#define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
+ #define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
u32 fw_dbg_cap;
@@ -2449,6 +2449,9 @@ struct bnxt {
#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \
(BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX))
+#define BNXT_SUPPORTS_QUEUE_API(bp) \
+ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
+ ((bp)->fw_cap & BNXT_FW_CAP_VNIC_RE_FLUSH))
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
@@ -2644,6 +2647,9 @@ struct bnxt {
#endif
u32 thermal_threshold_type;
enum board_idx board_idx;
+
+ struct bnxt_ctx_pg_info *fw_crash_mem;
+ u32 fw_crash_len;
};
#define BNXT_NUM_RX_RING_STATS 8
@@ -2790,7 +2796,6 @@ void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
-void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
@@ -2838,11 +2843,12 @@ int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
+int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u8 valid);
int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
bool all);
-void bnxt_clear_rss_ctxs(struct bnxt *bp);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index c06789882036..4e2b938ed1f7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -372,20 +372,81 @@ err:
return rc;
}
+static u32 bnxt_copy_crash_data(struct bnxt_ring_mem_info *rmem, void *buf,
+ u32 dump_len)
+{
+ u32 data_copied = 0;
+ u32 data_len;
+ int i;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ data_len = rmem->page_size;
+ if (data_copied + data_len > dump_len)
+ data_len = dump_len - data_copied;
+ memcpy(buf + data_copied, rmem->pg_arr[i], data_len);
+ data_copied += data_len;
+ if (data_copied >= dump_len)
+ break;
+ }
+ return data_copied;
+}
+
+static int bnxt_copy_crash_dump(struct bnxt *bp, void *buf, u32 dump_len)
+{
+ struct bnxt_ring_mem_info *rmem;
+ u32 offset = 0;
+
+ if (!bp->fw_crash_mem)
+ return -ENOENT;
+
+ rmem = &bp->fw_crash_mem->ring_mem;
+
+ if (rmem->depth > 1) {
+ int i;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ struct bnxt_ctx_pg_info *pg_tbl;
+
+ pg_tbl = bp->fw_crash_mem->ctx_pg_tbl[i];
+ offset += bnxt_copy_crash_data(&pg_tbl->ring_mem,
+ buf + offset,
+ dump_len - offset);
+ if (offset >= dump_len)
+ break;
+ }
+ } else {
+ bnxt_copy_crash_data(rmem, buf, dump_len);
+ }
+
+ return 0;
+}
+
+static bool bnxt_crash_dump_avail(struct bnxt *bp)
+{
+ u32 sig = 0;
+
+ /* First 4 bytes(signature) of crash dump is always non-zero */
+ bnxt_copy_crash_dump(bp, &sig, sizeof(sig));
+ return !!sig;
+}
+
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
{
if (dump_type == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)
+ return bnxt_copy_crash_dump(bp, buf, *dump_len);
#ifdef CONFIG_TEE_BNXT_FW
- return tee_bnxt_copy_coredump(buf, 0, *dump_len);
-#else
- return -EOPNOTSUPP;
+ else if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ return tee_bnxt_copy_coredump(buf, 0, *dump_len);
#endif
+ else
+ return -EOPNOTSUPP;
} else {
return __bnxt_get_coredump(bp, buf, dump_len);
}
}
-static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
+int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
{
struct hwrm_dbg_qcfg_output *resp;
struct hwrm_dbg_qcfg_input *req;
@@ -395,7 +456,8 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
return -EOPNOTSUPP;
if (dump_type == BNXT_DUMP_CRASH &&
- !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR))
+ !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR ||
+ (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)))
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
@@ -403,8 +465,12 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
return rc;
req->fid = cpu_to_le16(0xffff);
- if (dump_type == BNXT_DUMP_CRASH)
- req->flags = cpu_to_le16(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR);
+ if (dump_type == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_SOC);
+ else
+ req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_HOST);
+ }
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
@@ -412,7 +478,10 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
goto get_dump_len_exit;
if (dump_type == BNXT_DUMP_CRASH) {
- *dump_len = le32_to_cpu(resp->crashdump_size);
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ *dump_len = BNXT_CRASH_DUMP_LEN;
+ else
+ *dump_len = le32_to_cpu(resp->crashdump_size);
} else {
/* Driver adds coredump header and "HWRM_VER_GET response"
* segment additionally to coredump.
@@ -434,10 +503,17 @@ u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
{
u32 len = 0;
+ if (dump_type == BNXT_DUMP_CRASH &&
+ bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR &&
+ bp->fw_crash_mem) {
+ if (!bnxt_crash_dump_avail(bp))
+ return 0;
+
+ return bp->fw_crash_len;
+ }
+
if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) {
- if (dump_type == BNXT_DUMP_CRASH)
- len = BNXT_CRASH_DUMP_LEN;
- else
+ if (dump_type != BNXT_DUMP_CRASH)
__bnxt_get_coredump(bp, NULL, &len);
}
return len;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
index b1a1b2fffb19..a76d5c281413 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -111,7 +111,15 @@ struct hwrm_dbg_cmn_output {
#define HWRM_DBG_CMN_FLAGS_MORE 1
};
+#define BNXT_DBG_FL_CR_DUMP_SIZE_SOC \
+ DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+#define BNXT_DBG_FL_CR_DUMP_SIZE_HOST \
+ DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR
+#define BNXT_DBG_CR_DUMP_MDM_CFG_DDR \
+ DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR
+
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
+int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len);
u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
index 156c2404854f..127b7015f676 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -64,9 +64,9 @@ static const struct file_operations debugfs_dim_fops = {
static void debugfs_dim_ring_init(struct dim *dim, int ring_idx,
struct dentry *dd)
{
- static char qname[16];
+ static char qname[12];
- snprintf(qname, 10, "%d", ring_idx);
+ snprintf(qname, sizeof(qname), "%d", ring_idx);
debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9dadc89378f0..f71cc8188b4e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -955,11 +955,6 @@ static int bnxt_set_channels(struct net_device *dev,
}
tx_xdp = req_rx_rings;
}
- rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
- if (rc) {
- netdev_warn(dev, "Unable to allocate the requested rings\n");
- return rc;
- }
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
@@ -968,9 +963,12 @@ static int bnxt_set_channels(struct net_device *dev,
return -EINVAL;
}
- bnxt_clear_usr_fltrs(bp, true);
- if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
- bnxt_clear_rss_ctxs(bp);
+ rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
+ if (rc) {
+ netdev_warn(dev, "Unable to allocate the requested rings\n");
+ return rc;
+ }
+
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@@ -2000,7 +1998,6 @@ static int bnxt_set_rxfh(struct net_device *dev,
bnxt_modify_rss(bp, NULL, NULL, rxfh);
- bnxt_clear_usr_fltrs(bp, false);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -4161,7 +4158,7 @@ static void bnxt_get_pkgver(struct net_device *dev)
if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
len = strlen(bp->fw_ver_str);
- snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
+ snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len,
"/pkg %s", buf);
}
}
@@ -4993,9 +4990,16 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
return -EINVAL;
}
- if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
- netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
- return -EOPNOTSUPP;
+ if (dump->flag == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR &&
+ (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) {
+ netdev_info(dev,
+ "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
+ return -EOPNOTSUPP;
+ } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) {
+ netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n");
+ return -EOPNOTSUPP;
+ }
}
bp->dump_flag = dump->flag;
@@ -5040,11 +5044,8 @@ static int bnxt_get_ts_info(struct net_device *dev,
struct bnxt_ptp_cfg *ptp;
ptp = bp->ptp_cfg;
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
- info->phc_index = -1;
if (!ptp)
return 0;
@@ -5289,7 +5290,7 @@ void bnxt_ethtool_free(struct bnxt *bp)
const struct ethtool_ops bnxt_ethtool_ops = {
.cap_link_lanes_supported = 1,
- .cap_rss_ctx_supported = 1,
+ .rxfh_per_ctx_key = 1,
.rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1,
.rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5,
.rxfh_priv_size = sizeof(struct bnxt_rss_ctx),
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index f219709f9563..f8ef6f1a1964 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -403,6 +403,9 @@ struct cmd_nums {
#define HWRM_FUNC_LAG_UPDATE 0x1b1UL
#define HWRM_FUNC_LAG_FREE 0x1b2UL
#define HWRM_FUNC_LAG_QCFG 0x1b3UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_ADD 0x1c2UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_DELETE 0x1c3UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_QUERY 0x1c4UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -430,6 +433,9 @@ struct cmd_nums {
#define HWRM_STAT_GENERIC_QSTATS 0x218UL
#define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
#define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
+ #define HWRM_MFG_TESTS 0x21bUL
+ #define HWRM_PORT_POE_CFG 0x230UL
+ #define HWRM_PORT_POE_QCFG 0x231UL
#define HWRM_UDCC_QCAPS 0x258UL
#define HWRM_UDCC_CFG 0x259UL
#define HWRM_UDCC_QCFG 0x25aUL
@@ -439,6 +445,9 @@ struct cmd_nums {
#define HWRM_UDCC_COMP_CFG 0x25eUL
#define HWRM_UDCC_COMP_QCFG 0x25fUL
#define HWRM_UDCC_COMP_QUERY 0x260UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -500,10 +509,8 @@ struct cmd_nums {
#define HWRM_TFC_IF_TBL_GET 0x399UL
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
#define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x39cUL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x39dUL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x39eUL
#define HWRM_SV 0x400UL
+ #define HWRM_DBG_SERDES_TEST 0xff0eUL
#define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
@@ -533,6 +540,9 @@ struct cmd_nums {
#define HWRM_DBG_USEQ_RUN 0xff29UL
#define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
#define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
+ #define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL
+ #define HWRM_DBG_PTRACE 0xff2dUL
+ #define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL
#define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
#define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
#define HWRM_NVM_DEFRAG 0xffecUL
@@ -582,6 +592,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
#define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
#define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
+ #define HWRM_ERR_CODE_SECURE_SOC_ERROR 0x14UL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -613,8 +624,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 44
-#define HWRM_VERSION_STR "1.10.3.44"
+#define HWRM_VERSION_RSVD 68
+#define HWRM_VERSION_STR "1.10.3.68"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -850,7 +861,10 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
#define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
#define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x51UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1691,7 +1705,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:1088b/136B) */
+/* hwrm_func_qcaps_output (size:1152b/144B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1824,6 +1838,9 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED 0x80000000UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1845,7 +1862,9 @@ struct hwrm_func_qcaps_output {
__le32 roce_vf_max_qp;
__le32 roce_vf_max_srq;
__le32 roce_vf_max_gid;
- u8 unused_3[3];
+ __le32 flags_ext3;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
+ u8 unused_3[7];
u8 valid;
};
@@ -2021,7 +2040,8 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__le16 host_mtu;
- u8 unused_3[2];
+ __le16 flags2;
+ #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
u8 unused_4[2];
u8 port_kdnet_mode;
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
@@ -3671,33 +3691,38 @@ struct hwrm_func_backing_store_cfg_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
__le16 instance;
__le32 flags;
#define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
@@ -3772,6 +3797,11 @@ struct hwrm_func_backing_store_qcfg_v2_input {
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
__le16 instance;
@@ -3785,29 +3815,34 @@ struct hwrm_func_backing_store_qcfg_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
__le16 instance;
__le32 flags;
__le64 page_dir;
@@ -3883,6 +3918,13 @@ struct ts_split_entries {
__le32 rsvd2[2];
};
+/* ck_split_entries (size:128b/16B) */
+struct ck_split_entries {
+ __le32 num_quic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
struct hwrm_func_backing_store_qcaps_v2_input {
__le16 req_type;
@@ -3891,33 +3933,38 @@ struct hwrm_func_backing_store_qcaps_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
u8 rsvd[6];
};
@@ -3928,39 +3975,45 @@ struct hwrm_func_backing_store_qcaps_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
__le16 entry_size;
__le32 flags;
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL
__le32 instance_bit_map;
u8 ctx_init_value;
u8 ctx_init_offset;
@@ -4410,6 +4463,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_OVERHEATED 0x6UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
__le32 preemphasis;
@@ -4941,7 +4995,9 @@ struct hwrm_port_qstats_output {
__le16 resp_len;
__le16 tx_stat_size;
__le16 rx_stat_size;
- u8 unused_0[3];
+ u8 flags;
+ #define PORT_QSTATS_RESP_FLAGS_CLEARED 0x1UL
+ u8 unused_0[2];
u8 valid;
};
@@ -5074,6 +5130,7 @@ struct hwrm_port_qstats_ext_output {
__le16 total_active_cos_queues;
u8 flags;
#define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEARED 0x2UL
u8 valid;
};
@@ -6510,6 +6567,43 @@ struct hwrm_vnic_alloc_output {
u8 valid;
};
+/* hwrm_vnic_update_input (size:256b/32B) */
+struct hwrm_vnic_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 enables;
+ #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL
+ #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL
+ #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL
+ u8 vnic_state;
+ #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP
+ u8 metadata_format_type;
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4
+ __le16 mru;
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_update_output (size:128b/16B) */
+struct hwrm_vnic_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_vnic_free_input (size:192b/24B) */
struct hwrm_vnic_free_input {
__le16 req_type;
@@ -6640,6 +6734,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
#define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
#define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP 0x40000000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -7484,23 +7579,24 @@ struct hwrm_cfa_l2_filter_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP (0x3UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP
__le32 enables;
#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
#define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
@@ -8766,7 +8862,7 @@ struct ctx_hw_stats_ext {
__le64 rx_tpa_events;
};
-/* hwrm_stat_ctx_alloc_input (size:320b/40B) */
+/* hwrm_stat_ctx_alloc_input (size:384b/48B) */
struct hwrm_stat_ctx_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -8776,13 +8872,16 @@ struct hwrm_stat_ctx_alloc_input {
__le64 stats_dma_addr;
__le32 update_period_ms;
u8 stat_ctx_flags;
- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_DUP_HOST_BUF 0x2UL
u8 unused_0;
__le16 stats_dma_length;
__le16 flags;
#define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
__le16 steering_tag;
- __le32 unused_1;
+ __le32 stat_ctx_id;
+ __le16 alloc_seq_id;
+ u8 unused_1[6];
};
/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
@@ -9650,10 +9749,13 @@ struct hwrm_dbg_qcaps_output {
__le32 coredump_component_disable_caps;
#define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
__le32 flags;
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
- #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
+ #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL
+ #define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL
u8 unused_1[3];
u8 valid;
};
@@ -10092,16 +10194,19 @@ struct hwrm_nvm_erase_dir_entry_output {
u8 valid;
};
-/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
+/* hwrm_nvm_get_dev_info_input (size:192b/24B) */
struct hwrm_nvm_get_dev_info_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
+ u8 unused_0[7];
};
-/* hwrm_nvm_get_dev_info_output (size:704b/88B) */
+/* hwrm_nvm_get_dev_info_output (size:768b/96B) */
struct hwrm_nvm_get_dev_info_output {
__le16 error_code;
__le16 req_type;
@@ -10135,6 +10240,10 @@ struct hwrm_nvm_get_dev_info_output {
__le16 netctrl_fw_minor;
__le16 netctrl_fw_build;
__le16 netctrl_fw_patch;
+ __le16 srt2_fw_major;
+ __le16 srt2_fw_minor;
+ __le16 srt2_fw_build;
+ __le16 srt2_fw_patch;
u8 unused_0[7];
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 22898d3d088b..7bb8a5d74430 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -15,6 +15,7 @@
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
+#include <net/dcbnl.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
@@ -196,11 +197,8 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
ivi->max_tx_rate = vf->max_tx_rate;
ivi->min_tx_rate = vf->min_tx_rate;
- ivi->vlan = vf->vlan;
- if (vf->flags & BNXT_VF_QOS)
- ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
- else
- ivi->qos = 0;
+ ivi->vlan = vf->vlan & VLAN_VID_MASK;
+ ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
ivi->trusted = bnxt_is_trusted_vf(bp, vf);
if (!(vf->flags & BNXT_VF_LINK_FORCED))
@@ -256,21 +254,21 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
if (bp->hwrm_spec_code < 0x10201)
return -ENOTSUPP;
- if (vlan_proto != htons(ETH_P_8021Q))
+ if (vlan_proto != htons(ETH_P_8021Q) &&
+ (vlan_proto != htons(ETH_P_8021AD) ||
+ !(bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP)))
return -EPROTONOSUPPORT;
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
- /* TODO: needed to implement proper handling of user priority,
- * currently fail the command if there is valid priority
- */
- if (vlan_id > 4095 || qos)
+ if (vlan_id >= VLAN_N_VID || qos >= IEEE_8021Q_MAX_PRIORITIES ||
+ (!vlan_id && qos))
return -EINVAL;
vf = &bp->pf.vf[vf_id];
- vlan_tag = vlan_id;
+ vlan_tag = vlan_id | (u16)qos << VLAN_PRIO_SHIFT;
if (vlan_tag == vf->vlan)
return 0;
@@ -279,6 +277,10 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
req->fid = cpu_to_le16(vf->fw_fid);
req->dflt_vlan = cpu_to_le16(vlan_tag);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+ if (bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP) {
+ req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_TPID);
+ req->tpid = vlan_proto;
+ }
rc = hwrm_req_send(bp, req);
if (!rc)
vf->vlan = vlan_tag;
@@ -900,11 +902,6 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(dev);
- if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
- netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
- return 0;
- }
-
rtnl_lock();
if (!netif_running(dev)) {
netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index b9e7d3e7b15d..fdd6356f21ef 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -176,11 +176,17 @@ EXPORT_SYMBOL(bnxt_unregister_dev);
static int bnxt_set_dflt_ulp_msix(struct bnxt *bp)
{
- u32 roce_msix = BNXT_VF(bp) ?
- BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
+ int roce_msix = BNXT_MAX_ROCE_MSIX;
- return ((bp->flags & BNXT_FLAG_ROCE_CAP) ?
- min_t(u32, roce_msix, num_online_cpus()) : 0);
+ if (BNXT_VF(bp))
+ roce_msix = BNXT_MAX_ROCE_MSIX_VF;
+ else if (bp->port_partition_type)
+ roce_msix = BNXT_MAX_ROCE_MSIX_NPAR_PF;
+
+ /* NQ MSIX vectors should match the number of CPUs plus 1 more for
+ * the CREQ MSIX, up to the default.
+ */
+ return min_t(int, roce_msix, num_online_cpus() + 1);
}
int bnxt_send_msg(struct bnxt_en_dev *edev,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 4eafe6ec0abf..4f4914f5c84c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -15,8 +15,10 @@
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
-#define BNXT_MAX_ROCE_MSIX 9
-#define BNXT_MAX_VF_ROCE_MSIX 2
+
+#define BNXT_MAX_ROCE_MSIX_VF 2
+#define BNXT_MAX_ROCE_MSIX_NPAR_PF 5
+#define BNXT_MAX_ROCE_MSIX 64
struct hwrm_async_event_cmpl;
struct bnxt;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 345681d5007e..f88b641533fc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -297,11 +297,6 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
* redirect is coming from a frame received by the
* bnxt_en driver.
*/
- rx_buf = &rxr->rx_buf_ring[cons];
- mapping = rx_buf->mapping - bp->rx_dma_offset;
- dma_unmap_page_attrs(&pdev->dev, mapping,
- BNXT_RX_PAGE_SIZE, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
/* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index c2b4188a1ef1..a9040c42d2ff 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -31,6 +31,7 @@
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
#include <linux/random.h>
+#include <linux/workqueue.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define BCM_VLAN 1
#endif
@@ -3015,9 +3016,9 @@ static int cnic_service_bnx2(void *data, void *status_blk)
return cnic_service_bnx2_queues(dev);
}
-static void cnic_service_bnx2_msix(struct tasklet_struct *t)
+static void cnic_service_bnx2_msix(struct work_struct *work)
{
- struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
struct cnic_dev *dev = cp->dev;
cp->last_status_idx = cnic_service_bnx2_queues(dev);
@@ -3036,7 +3037,7 @@ static void cnic_doirq(struct cnic_dev *dev)
prefetch(cp->status_blk.gen);
prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
- tasklet_schedule(&cp->cnic_irq_task);
+ queue_work(system_bh_wq, &cp->cnic_irq_bh_work);
}
}
@@ -3140,9 +3141,9 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
return last_status;
}
-static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
+static void cnic_service_bnx2x_bh_work(struct work_struct *work)
{
- struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
struct cnic_dev *dev = cp->dev;
struct bnx2x *bp = netdev_priv(dev->netdev);
u32 status_idx, new_status_idx;
@@ -4428,7 +4429,7 @@ static void cnic_free_irq(struct cnic_dev *dev)
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
cp->disable_int_sync(dev);
- tasklet_kill(&cp->cnic_irq_task);
+ cancel_work_sync(&cp->cnic_irq_bh_work);
free_irq(ethdev->irq_arr[0].vector, dev);
}
}
@@ -4441,7 +4442,7 @@ static int cnic_request_irq(struct cnic_dev *dev)
err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
if (err)
- tasklet_disable(&cp->cnic_irq_task);
+ disable_work_sync(&cp->cnic_irq_bh_work);
return err;
}
@@ -4464,7 +4465,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
cp->last_status_idx = cp->status_blk.bnx2->status_idx;
- tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
+ INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2_msix);
err = cnic_request_irq(dev);
if (err)
return err;
@@ -4873,7 +4874,7 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
struct cnic_eth_dev *ethdev = cp->ethdev;
int err = 0;
- tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
+ INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2x_bh_work);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
err = cnic_request_irq(dev);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index fedc84ada937..1a314a75d2d2 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -268,7 +268,7 @@ struct cnic_local {
u32 bnx2x_igu_sb_id;
u32 int_num;
u32 last_status_idx;
- struct tasklet_struct cnic_irq_task;
+ struct work_struct cnic_irq_bh_work;
struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index c7e7dac057a3..f7be886570d8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -37,7 +37,7 @@
#include <linux/phy.h>
#include <linux/platform_data/bcmgenet.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "bcmgenet.h"
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 0ec5f01551f9..378815917741 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6145,9 +6145,7 @@ static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info
{
struct tg3 *tp = netdev_priv(dev);
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
if (tg3_flag(tp, PTP_CAPABLE)) {
info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
@@ -6157,8 +6155,6 @@ static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info
if (tp->ptp_clock)
info->phc_index = ptp_clock_index(tp->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index ea71612f6b36..5740c98d8c9f 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -13,6 +13,7 @@
#include <linux/net_tstamp.h>
#include <linux/interrupt.h>
#include <linux/phy/phy.h>
+#include <linux/workqueue.h>
#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
#define MACB_EXT_DESC
@@ -1330,7 +1331,7 @@ struct macb {
spinlock_t rx_fs_lock;
unsigned int max_tuples;
- struct tasklet_struct hresp_err_tasklet;
+ struct work_struct hresp_err_bh_work;
int rx_bd_rd_prefetch;
int tx_bd_rd_prefetch;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 11665be3a22c..56901280ba04 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -930,9 +930,6 @@ static int macb_mdiobus_register(struct macb *bp)
return ret;
}
- if (of_phy_is_fixed_link(np))
- return mdiobus_register(bp->mii_bus);
-
/* Only create the PHY from the device tree if at least one PHY is
* described. Otherwise scan the entire MDIO bus. We do this to support
* old device tree that did not follow the best practices and did not
@@ -953,8 +950,19 @@ static int macb_mdiobus_register(struct macb *bp)
static int macb_mii_init(struct macb *bp)
{
+ struct device_node *child, *np = bp->pdev->dev.of_node;
int err = -ENXIO;
+ /* With fixed-link, we don't need to register the MDIO bus,
+ * except if we have a child named "mdio" in the device tree.
+ * In that case, some devices may be attached to the MACB's MDIO bus.
+ */
+ child = of_get_child_by_name(np, "mdio");
+ if (child)
+ of_node_put(child);
+ else if (of_phy_is_fixed_link(np))
+ return macb_mii_probe(bp->dev);
+
/* Enable management port */
macb_writel(bp, NCR, MACB_BIT(MPE));
@@ -1792,9 +1800,9 @@ static int macb_tx_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void macb_hresp_error_task(struct tasklet_struct *t)
+static void macb_hresp_error_task(struct work_struct *work)
{
- struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
+ struct macb *bp = from_work(bp, work, hresp_err_bh_work);
struct net_device *dev = bp->dev;
struct macb_queue *queue;
unsigned int q;
@@ -1994,7 +2002,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_BIT(HRESP)) {
- tasklet_schedule(&bp->hresp_err_tasklet);
+ queue_work(system_bh_wq, &bp->hresp_err_bh_work);
netdev_err(dev, "DMA bus error: HRESP not OK\n");
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -3410,8 +3418,6 @@ static int gem_get_ts_info(struct net_device *dev,
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -3423,7 +3429,8 @@ static int gem_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
- info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
+ if (bp->ptp_clock)
+ info->phc_index = ptp_clock_index(bp->ptp_clock);
return 0;
}
@@ -4184,6 +4191,8 @@ static int macb_init(struct platform_device *pdev)
dev->ethtool_ops = &macb_ethtool_ops;
}
+ netdev_sw_irq_coalesce_default_on(dev);
+
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
/* Set features */
@@ -5119,12 +5128,12 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_free_netdev;
}
- /* MTU range: 68 - 1500 or 10240 */
+ /* MTU range: 68 - 1518 or 10240 */
dev->min_mtu = GEM_MTU_MIN_SIZE;
if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
else
- dev->max_mtu = ETH_DATA_LEN;
+ dev->max_mtu = 1536 - ETH_HLEN - ETH_FCS_LEN;
if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
@@ -5172,7 +5181,7 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_unregister_mdio;
}
- tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
+ INIT_WORK(&bp->hresp_err_bh_work, macb_hresp_error_task);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
@@ -5216,7 +5225,7 @@ static void macb_remove(struct platform_device *pdev)
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
- tasklet_kill(&bp->hresp_err_tasklet);
+ cancel_work_sync(&bp->hresp_err_bh_work);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (!pm_runtime_suspended(&pdev->dev)) {
@@ -5250,8 +5259,8 @@ static int __maybe_unused macb_suspend(struct device *dev)
if (bp->wol & MACB_WOL_ENABLED) {
/* Check for IP address in WOL ARP mode */
idev = __in_dev_get_rcu(bp->dev);
- if (idev && idev->ifa_list)
- ifa = rcu_access_pointer(idev->ifa_list);
+ if (idev)
+ ifa = rcu_dereference(idev->ifa_list);
if ((bp->wolopts & WAKE_ARP) && !ifa) {
netdev_err(netdev, "IP address not assigned as required by WoL walk ARP\n");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
index f66d22de5168..fc4f5aee6ab3 100644
--- a/drivers/net/ethernet/cadence/macb_pci.c
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -19,8 +19,7 @@
#define PCI_DRIVER_NAME "macb_pci"
#define PLAT_DRIVER_NAME "macb"
-#define CDNS_VENDOR_ID 0x17cd
-#define CDNS_DEVICE_ID 0xe007
+#define PCI_DEVICE_ID_CDNS_MACB 0xe007
#define GEM_PCLK_RATE 50000000
#define GEM_HCLK_RATE 50000000
@@ -117,7 +116,7 @@ static void macb_remove(struct pci_dev *pdev)
}
static const struct pci_device_id dev_id_table[] = {
- { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), },
+ { PCI_VDEVICE(CDNS, PCI_DEVICE_ID_CDNS_MACB) },
{ 0, }
};
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
index 2d06097d3f61..40f529d0bc4c 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
@@ -43,6 +43,4 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct);
int cn23xx_setup_octeon_vf_device(struct octeon_device *oct);
u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
-
-void cn23xx_dump_vf_initialized_regs(struct octeon_device *oct);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
index 8ed57134ee0c..129c8b84f549 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -86,7 +86,6 @@ u32
lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
void lio_cn6xxx_enable_interrupt(struct octeon_device *oct, u8 unused);
void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, u8 unused);
-void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
struct octeon_reg_list *reg_list);
u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 5835965dbc32..c849e2c871a9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -2496,37 +2496,31 @@ ret_intrmod:
return ret;
}
+#ifdef PTP_HARDWARE_TIMESTAMPING
static int lio_get_ts_info(struct net_device *netdev,
struct kernel_ethtool_ts_info *info)
{
struct lio *lio = GET_LIO(netdev);
info->so_timestamping =
-#ifdef PTP_HARDWARE_TIMESTAMPING
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
-#endif
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
if (lio->ptp_clock)
info->phc_index = ptp_clock_index(lio->ptp_clock);
- else
- info->phc_index = -1;
-#ifdef PTP_HARDWARE_TIMESTAMPING
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
-#endif
return 0;
}
+#endif
/* Return register dump len. */
static int lio_get_regs_len(struct net_device *dev)
@@ -3146,7 +3140,9 @@ static const struct ethtool_ops lio_ethtool_ops = {
.set_coalesce = lio_set_intr_coalesce,
.get_priv_flags = lio_get_priv_flags,
.set_priv_flags = lio_set_priv_flags,
+#ifdef PTP_HARDWARE_TIMESTAMPING
.get_ts_info = lio_get_ts_info,
+#endif
};
static const struct ethtool_ops lio_vf_ethtool_ops = {
@@ -3169,7 +3165,9 @@ static const struct ethtool_ops lio_vf_ethtool_ops = {
.set_coalesce = lio_set_intr_coalesce,
.get_priv_flags = lio_get_priv_flags,
.set_priv_flags = lio_set_priv_flags,
+#ifdef PTP_HARDWARE_TIMESTAMPING
.get_ts_info = lio_get_ts_info,
+#endif
};
void liquidio_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index fb380b4f3e02..d26364c2ac81 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -804,13 +804,6 @@ int octeon_init_consoles(struct octeon_device *oct);
int octeon_add_console(struct octeon_device *oct, u32 console_num,
char *dbg_enb);
-/** write or read from a console */
-int octeon_console_write(struct octeon_device *oct, u32 console_num,
- char *buffer, u32 write_request_size, u32 flags);
-int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
-
-int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
-
/** Removes all attached consoles. */
void octeon_remove_consoles(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index c9b19e624dce..232ae72c0e37 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -395,8 +395,6 @@ int octeon_register_dispatch_fn(struct octeon_device *oct,
void *octeon_get_dispatch_arg(struct octeon_device *oct,
u16 opcode, u16 subcode);
-void octeon_droq_print_stats(void);
-
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
int octeon_create_droq(struct octeon_device *oct, u32 q_no,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index bebf3bd349c6..a04f36a0e1a0 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -378,9 +378,6 @@ int octeon_send_command(struct octeon_device *oct, u32 iq_no,
u32 force_db, void *cmd, void *buf,
u32 datasize, u32 reqtype);
-void octeon_dump_soft_command(struct octeon_device *oct,
- struct octeon_soft_command *sc);
-
void octeon_prepare_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc,
u8 opcode, u8 subcode,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 6a04d2530176..d0ff0c170b1a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -844,8 +844,6 @@ static int nicvf_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8453defc296c..b7531041c56d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -359,8 +359,6 @@ int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
/* Register access APIs */
void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
-void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
-u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
u64 qidx, u64 val);
u64 nicvf_queue_reg_read(struct nicvf *nic,
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index a40c266c37f2..608cc6af5af1 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1054,18 +1054,12 @@ static int phy_interface_mode(u8 lmac_type)
static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
{
- struct lmac *lmac, **priv;
+ struct lmac *lmac;
u64 cfg;
lmac = &bgx->lmac[lmacid];
lmac->bgx = bgx;
- lmac->netdev = alloc_netdev_dummy(sizeof(struct lmac *));
- if (!lmac->netdev)
- return -ENOMEM;
- priv = netdev_priv(lmac->netdev);
- *priv = lmac;
-
if ((lmac->lmac_type == BGX_MODE_SGMII) ||
(lmac->lmac_type == BGX_MODE_QSGMII) ||
(lmac->lmac_type == BGX_MODE_RGMII)) {
@@ -1191,7 +1185,6 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
(lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
phy_disconnect(lmac->phydev);
- free_netdev(lmac->netdev);
lmac->phydev = NULL;
}
@@ -1653,6 +1646,23 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bgx_get_qlm_mode(bgx);
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ struct lmac *lmacp, **priv;
+
+ lmacp = &bgx->lmac[lmac];
+ lmacp->netdev = alloc_netdev_dummy(sizeof(struct lmac *));
+
+ if (!lmacp->netdev) {
+ for (int i = 0; i < lmac; i++)
+ free_netdev(bgx->lmac[i].netdev);
+ err = -ENOMEM;
+ goto err_enable;
+ }
+
+ priv = netdev_priv(lmacp->netdev);
+ *priv = lmacp;
+ }
+
err = bgx_init_phy(bgx);
if (err)
goto err_enable;
@@ -1692,8 +1702,10 @@ static void bgx_remove(struct pci_dev *pdev)
u8 lmac;
/* Disable all LMACs */
- for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
bgx_lmac_disable(bgx, lmac);
+ free_netdev(bgx->lmac[lmac].netdev);
+ }
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index cdea49392185..84f16ababaee 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -219,9 +219,7 @@
void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid, u64 mac, u8 vf);
void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf);
void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode);
-void octeon_mdiobus_force_mod_depencency(void);
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
-void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
unsigned bgx_get_map(int node);
int bgx_get_lmac_count(int node, int bgx);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index e56eff701395..304bb282ab03 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -329,8 +329,6 @@ irqreturn_t t1_slow_intr_handler(adapter_t *adapter);
int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct board_info *t1_get_board_info(unsigned int board_id);
-const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
- unsigned short ssid);
int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
struct adapter_params *p);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 7d7d3e0098df..3b7068832f95 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -1034,7 +1034,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_RXCSUM;
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_RXCSUM | NETIF_F_LLTX | NETIF_F_HIGHDMA;
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+ netdev->lltx = true;
if (vlan_tso_capable(adapter)) {
netdev->features |=
diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.h b/drivers/net/ethernet/chelsio/cxgb/tp.h
index ba15675d56df..64f93dcc676b 100644
--- a/drivers/net/ethernet/chelsio/cxgb/tp.h
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.h
@@ -65,9 +65,7 @@ void t1_tp_intr_enable(struct petp *tp);
void t1_tp_intr_clear(struct petp *tp);
int t1_tp_intr_handler(struct petp *tp);
-void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps);
void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable);
void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable);
-int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size);
int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk);
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
index f04e81f33795..a08fc762a438 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
@@ -106,6 +106,4 @@ static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
return &e->t3c_tid;
}
-int attach_t3cdev(struct t3cdev *dev);
-void detach_t3cdev(struct t3cdev *dev);
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index fca9533bc011..bbf7641a0fc7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1958,11 +1958,6 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
-void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
- const u8 *addr);
-int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
- u64 mask0, u64 mask1, unsigned int crc, bool enable);
-
int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
enum dev_master master, enum dev_state *state);
int t4_fw_bye(struct adapter *adap, unsigned int mbox);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
index 80c6627fe981..c80a93347a8c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -122,7 +122,6 @@ void cxgb4_dcb_version_init(struct net_device *);
void cxgb4_dcb_reset(struct net_device *dev);
void cxgb4_dcb_state_fsm(struct net_device *, enum cxgb4_dcb_state_input);
void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *);
-void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *);
extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops;
static inline __u8 bitswap_1(unsigned char val)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 3d091947ae00..7f3f5afa864f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1556,12 +1556,9 @@ static int get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *ts
struct adapter *adapter = pi->adapter;
ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
-
- ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -1575,8 +1572,6 @@ static int get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *ts
if (adapter->ptp_clock)
ts_info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- ts_info->phc_index = -1;
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
index 33b2c0c45509..f6f745f5c022 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
@@ -81,8 +81,7 @@ int cxgb_fcoe_enable(struct net_device *netdev)
netdev->features |= NETIF_F_FCOE_CRC;
netdev->vlan_features |= NETIF_F_FCOE_CRC;
- netdev->features |= NETIF_F_FCOE_MTU;
- netdev->vlan_features |= NETIF_F_FCOE_MTU;
+ netdev->fcoe_mtu = true;
netdev_features_change(netdev);
@@ -112,8 +111,7 @@ int cxgb_fcoe_disable(struct net_device *netdev)
netdev->features &= ~NETIF_F_FCOE_CRC;
netdev->vlan_features &= ~NETIF_F_FCOE_CRC;
- netdev->features &= ~NETIF_F_FCOE_MTU;
- netdev->vlan_features &= ~NETIF_F_FCOE_MTU;
+ netdev->fcoe_mtu = false;
netdev_features_change(netdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 786ceae34488..dd9e68465e69 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -1244,7 +1244,8 @@ static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
* in the Compressed Filter Tuple.
*/
if (tp->vlan_shift >= 0 && fs->mask.ivlan)
- ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
+ ntuple |= (u64)(FT_VLAN_VLD_F |
+ fs->val.ivlan) << tp->vlan_shift;
if (tp->port_shift >= 0 && fs->mask.iport)
ntuple |= (u64)fs->val.iport << tp->port_shift;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
index 9050568a034c..64663112cad8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -242,7 +242,7 @@ struct cxgb4_next_header {
* field's value to jump to next header such as IHL field
* in IPv4 header.
*/
- struct tc_u32_sel sel;
+ struct tc_u32_sel_hdr sel;
struct tc_u32_key key;
/* location of jump to make */
const struct cxgb4_match_field *jump;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index a9599ba26975..d8cafaa7ddb4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -508,7 +508,6 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
unsigned int cxgb4_port_chan(const struct net_device *dev);
unsigned int cxgb4_port_e2cchan(const struct net_device *dev);
unsigned int cxgb4_port_viid(const struct net_device *dev);
-unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid);
unsigned int cxgb4_port_idx(const struct net_device *dev);
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx);
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 854d87e1125c..2e3973a32d9d 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -342,10 +342,10 @@ int cxgbi_ppm_release(struct cxgbi_ppm *ppm)
}
EXPORT_SYMBOL(cxgbi_ppm_release);
-static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
- unsigned int *pcpu_ppmax)
+static struct cxgbi_ppm_pool __percpu *
+ppm_alloc_cpu_pool(unsigned int *total, unsigned int *pcpu_ppmax)
{
- struct cxgbi_ppm_pool *pools;
+ struct cxgbi_ppm_pool __percpu *pools;
unsigned int ppmax = (*total) / num_possible_cpus();
unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3;
unsigned int bmap;
@@ -392,7 +392,7 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
unsigned int iscsi_edram_size)
{
struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
- struct cxgbi_ppm_pool *pool = NULL;
+ struct cxgbi_ppm_pool __percpu *pool = NULL;
unsigned int pool_index_max = 0;
unsigned int ppmax_pool = 0;
unsigned int ppod_bmap_size;
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 1f495cfd7959..c2007cd86416 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -16,13 +16,12 @@
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/platform_data/eth-ep93xx.h>
-
#define DRV_MODULE_NAME "ep93xx-eth"
#define RX_QUEUE_ENTRIES 64
@@ -738,25 +737,6 @@ static const struct net_device_ops ep93xx_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
-{
- struct net_device *dev;
-
- dev = alloc_etherdev(sizeof(struct ep93xx_priv));
- if (dev == NULL)
- return NULL;
-
- eth_hw_addr_set(dev, data->dev_addr);
-
- dev->ethtool_ops = &ep93xx_ethtool_ops;
- dev->netdev_ops = &ep93xx_netdev_ops;
-
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
-
- return dev;
-}
-
-
static void ep93xx_eth_remove(struct platform_device *pdev)
{
struct net_device *dev;
@@ -786,27 +766,49 @@ static void ep93xx_eth_remove(struct platform_device *pdev)
static int ep93xx_eth_probe(struct platform_device *pdev)
{
- struct ep93xx_eth_data *data;
struct net_device *dev;
struct ep93xx_priv *ep;
struct resource *mem;
+ void __iomem *base_addr;
+ struct device_node *np;
+ u8 addr[ETH_ALEN];
+ u32 phy_id;
int irq;
int err;
if (pdev == NULL)
return -ENODEV;
- data = dev_get_platdata(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!mem || irq < 0)
return -ENXIO;
- dev = ep93xx_dev_alloc(data);
+ base_addr = ioremap(mem->start, resource_size(mem));
+ if (!base_addr)
+ return dev_err_probe(&pdev->dev, -EIO, "Failed to ioremap ethernet registers\n");
+
+ np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!np)
+ return dev_err_probe(&pdev->dev, -ENODEV, "Please provide \"phy-handle\"\n");
+
+ err = of_property_read_u32(np, "reg", &phy_id);
+ of_node_put(np);
+ if (err)
+ return dev_err_probe(&pdev->dev, -ENOENT, "Failed to locate \"phy_id\"\n");
+
+ dev = alloc_etherdev(sizeof(struct ep93xx_priv));
if (dev == NULL) {
err = -ENOMEM;
goto err_out;
}
+
+ memcpy_fromio(addr, base_addr + 0x50, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
+ dev->ethtool_ops = &ep93xx_ethtool_ops;
+ dev->netdev_ops = &ep93xx_netdev_ops;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+
ep = netdev_priv(dev);
ep->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -822,15 +824,10 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
goto err_out;
}
- ep->base_addr = ioremap(mem->start, resource_size(mem));
- if (ep->base_addr == NULL) {
- dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
- err = -EIO;
- goto err_out;
- }
+ ep->base_addr = base_addr;
ep->irq = irq;
- ep->mii.phy_id = data->phy_id;
+ ep->mii.phy_id = phy_id;
ep->mii.phy_id_mask = 0x1f;
ep->mii.reg_num_mask = 0x1f;
ep->mii.dev = dev;
@@ -857,12 +854,18 @@ err_out:
return err;
}
+static const struct of_device_id ep93xx_eth_of_ids[] = {
+ { .compatible = "cirrus,ep9301-eth" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_eth_of_ids);
static struct platform_driver ep93xx_eth_driver = {
.probe = ep93xx_eth_probe,
.remove_new = ep93xx_eth_remove,
.driver = {
.name = "ep93xx-eth",
+ .of_match_table = ep93xx_eth_of_ids,
},
};
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 300ad05ee05b..0cc3644ee855 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -128,6 +128,40 @@ struct vxlan_offload {
u8 flags;
};
+struct enic_wq_stats {
+ u64 packets; /* pkts queued for Tx */
+ u64 stopped; /* Tx ring almost full, queue stopped */
+ u64 wake; /* Tx ring no longer full, queue woken up*/
+ u64 tso; /* non-encap tso pkt */
+ u64 encap_tso; /* encap tso pkt */
+ u64 encap_csum; /* encap HW csum */
+ u64 csum_partial; /* skb->ip_summed = CHECKSUM_PARTIAL */
+ u64 csum_none; /* HW csum not required */
+ u64 bytes; /* bytes queued for Tx */
+ u64 add_vlan; /* HW adds vlan tag */
+ u64 cq_work; /* Tx completions processed */
+ u64 cq_bytes; /* Tx bytes processed */
+ u64 null_pkt; /* skb length <= 0 */
+ u64 skb_linear_fail; /* linearize failures */
+ u64 desc_full_awake; /* TX ring full while queue awake */
+};
+
+struct enic_rq_stats {
+ u64 packets; /* pkts received */
+ u64 bytes; /* bytes received */
+ u64 l4_rss_hash; /* hashed on l4 */
+ u64 l3_rss_hash; /* hashed on l3 */
+ u64 csum_unnecessary; /* HW verified csum */
+ u64 csum_unnecessary_encap; /* HW verified csum on encap packet */
+ u64 vlan_stripped; /* HW stripped vlan */
+ u64 napi_complete; /* napi complete intr reenabled */
+ u64 napi_repoll; /* napi poll again */
+ u64 bad_fcs; /* bad pkts */
+ u64 pkt_truncated; /* truncated pkts */
+ u64 no_skb; /* out of skbs */
+ u64 desc_skip; /* Rx pkt went into later buffer */
+};
+
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
@@ -162,16 +196,16 @@ struct enic {
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
spinlock_t wq_lock[ENIC_WQ_MAX];
+ struct enic_wq_stats wq_stats[ENIC_WQ_MAX];
unsigned int wq_count;
u16 loop_enable;
u16 loop_tag;
/* receive queue cache line section */
____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
+ struct enic_rq_stats rq_stats[ENIC_RQ_MAX];
unsigned int rq_count;
struct vxlan_offload vxlan;
- u64 rq_truncated_pkts;
- u64 rq_bad_fcs;
struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
/* interrupt resource cache line section */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f2f1055880b2..f7986f2b6a17 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -32,6 +32,41 @@ struct enic_stat {
.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
}
+#define ENIC_PER_RQ_STAT(stat) { \
+ .name = "rq[%d]_"#stat, \
+ .index = offsetof(struct enic_rq_stats, stat) / sizeof(u64) \
+}
+
+#define ENIC_PER_WQ_STAT(stat) { \
+ .name = "wq[%d]_"#stat, \
+ .index = offsetof(struct enic_wq_stats, stat) / sizeof(u64) \
+}
+
+static const struct enic_stat enic_per_rq_stats[] = {
+ ENIC_PER_RQ_STAT(l4_rss_hash),
+ ENIC_PER_RQ_STAT(l3_rss_hash),
+ ENIC_PER_RQ_STAT(csum_unnecessary_encap),
+ ENIC_PER_RQ_STAT(vlan_stripped),
+ ENIC_PER_RQ_STAT(napi_complete),
+ ENIC_PER_RQ_STAT(napi_repoll),
+ ENIC_PER_RQ_STAT(no_skb),
+ ENIC_PER_RQ_STAT(desc_skip),
+};
+
+#define NUM_ENIC_PER_RQ_STATS ARRAY_SIZE(enic_per_rq_stats)
+
+static const struct enic_stat enic_per_wq_stats[] = {
+ ENIC_PER_WQ_STAT(encap_tso),
+ ENIC_PER_WQ_STAT(encap_csum),
+ ENIC_PER_WQ_STAT(add_vlan),
+ ENIC_PER_WQ_STAT(cq_work),
+ ENIC_PER_WQ_STAT(cq_bytes),
+ ENIC_PER_WQ_STAT(null_pkt),
+ ENIC_PER_WQ_STAT(skb_linear_fail),
+ ENIC_PER_WQ_STAT(desc_full_awake),
+};
+
+#define NUM_ENIC_PER_WQ_STATS ARRAY_SIZE(enic_per_wq_stats)
static const struct enic_stat enic_tx_stats[] = {
ENIC_TX_STAT(tx_frames_ok),
ENIC_TX_STAT(tx_unicast_frames_ok),
@@ -46,6 +81,8 @@ static const struct enic_stat enic_tx_stats[] = {
ENIC_TX_STAT(tx_tso),
};
+#define NUM_ENIC_TX_STATS ARRAY_SIZE(enic_tx_stats)
+
static const struct enic_stat enic_rx_stats[] = {
ENIC_RX_STAT(rx_frames_ok),
ENIC_RX_STAT(rx_frames_total),
@@ -70,13 +107,13 @@ static const struct enic_stat enic_rx_stats[] = {
ENIC_RX_STAT(rx_frames_to_max),
};
+#define NUM_ENIC_RX_STATS ARRAY_SIZE(enic_rx_stats)
+
static const struct enic_stat enic_gen_stats[] = {
ENIC_GEN_STAT(dma_map_error),
};
-static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
-static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
-static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
+#define NUM_ENIC_GEN_STATS ARRAY_SIZE(enic_gen_stats)
static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
{
@@ -141,22 +178,38 @@ static void enic_get_drvinfo(struct net_device *netdev,
static void enic_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
+ struct enic *enic = netdev_priv(netdev);
unsigned int i;
+ unsigned int j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < enic_n_tx_stats; i++) {
+ for (i = 0; i < NUM_ENIC_TX_STATS; i++) {
memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
- for (i = 0; i < enic_n_rx_stats; i++) {
+ for (i = 0; i < NUM_ENIC_RX_STATS; i++) {
memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
- for (i = 0; i < enic_n_gen_stats; i++) {
+ for (i = 0; i < NUM_ENIC_GEN_STATS; i++) {
memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ for (i = 0; i < enic->rq_count; i++) {
+ for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ enic_per_rq_stats[j].name, i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ for (i = 0; i < enic->wq_count; i++) {
+ for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ enic_per_wq_stats[j].name, i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
break;
}
}
@@ -242,9 +295,19 @@ err_out:
static int enic_get_sset_count(struct net_device *netdev, int sset)
{
+ struct enic *enic = netdev_priv(netdev);
+ unsigned int n_per_rq_stats;
+ unsigned int n_per_wq_stats;
+ unsigned int n_stats;
+
switch (sset) {
case ETH_SS_STATS:
- return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
+ n_per_rq_stats = NUM_ENIC_PER_RQ_STATS * enic->rq_count;
+ n_per_wq_stats = NUM_ENIC_PER_WQ_STATS * enic->wq_count;
+ n_stats = NUM_ENIC_TX_STATS + NUM_ENIC_RX_STATS +
+ NUM_ENIC_GEN_STATS +
+ n_per_rq_stats + n_per_wq_stats;
+ return n_stats;
default:
return -EOPNOTSUPP;
}
@@ -256,6 +319,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *vstats;
unsigned int i;
+ unsigned int j;
int err;
err = enic_dev_stats_dump(enic, &vstats);
@@ -266,12 +330,30 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
if (err == -ENOMEM)
return;
- for (i = 0; i < enic_n_tx_stats; i++)
+ for (i = 0; i < NUM_ENIC_TX_STATS; i++)
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
- for (i = 0; i < enic_n_rx_stats; i++)
+ for (i = 0; i < NUM_ENIC_RX_STATS; i++)
*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
- for (i = 0; i < enic_n_gen_stats; i++)
+ for (i = 0; i < NUM_ENIC_GEN_STATS; i++)
*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
+ for (i = 0; i < enic->rq_count; i++) {
+ struct enic_rq_stats *rqstats = &enic->rq_stats[i];
+ int index;
+
+ for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
+ index = enic_per_rq_stats[j].index;
+ *(data++) = ((u64 *)rqstats)[index];
+ }
+ }
+ for (i = 0; i < enic->wq_count; i++) {
+ struct enic_wq_stats *wqstats = &enic->wq_stats[i];
+ int index;
+
+ for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
+ index = enic_per_wq_stats[j].index;
+ *(data++) = ((u64 *)wqstats)[index];
+ }
+ }
}
static u32 enic_get_msglevel(struct net_device *netdev)
@@ -601,9 +683,7 @@ static int enic_set_rxfh(struct net_device *netdev,
static int enic_get_ts_info(struct net_device *netdev,
struct kernel_ethtool_ts_info *info)
{
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 5f26fc3ad655..ffed14b63d41 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -46,6 +46,7 @@
#include <linux/crash_dump.h>
#include <net/busy_poll.h>
#include <net/vxlan.h>
+#include <net/netdev_queues.h>
#include "cq_enet_desc.h"
#include "vnic_dev.h"
@@ -339,6 +340,10 @@ static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
static void enic_wq_free_buf(struct vnic_wq *wq,
struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ enic->wq_stats[wq->index].cq_work++;
+ enic->wq_stats[wq->index].cq_bytes += buf->len;
enic_free_wq_buf(wq, buf);
}
@@ -355,8 +360,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
vnic_wq_desc_avail(&enic->wq[q_number]) >=
- (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
+ (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
netif_wake_subqueue(enic->netdev, q_number);
+ enic->wq_stats[q_number].wake++;
+ }
spin_unlock(&enic->wq_lock[q_number]);
@@ -590,6 +597,11 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ /* The enic_queue_wq_desc() above does not do HW checksum */
+ enic->wq_stats[wq->index].csum_none++;
+ enic->wq_stats[wq->index].packets++;
+ enic->wq_stats[wq->index].bytes += skb->len;
+
return err;
}
@@ -622,6 +634,10 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ enic->wq_stats[wq->index].csum_partial++;
+ enic->wq_stats[wq->index].packets++;
+ enic->wq_stats[wq->index].bytes += skb->len;
+
return err;
}
@@ -676,15 +692,18 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
unsigned int offset = 0;
unsigned int hdr_len;
dma_addr_t dma_addr;
+ unsigned int pkts;
unsigned int len;
skb_frag_t *frag;
if (skb->encapsulation) {
hdr_len = skb_inner_tcp_all_headers(skb);
enic_preload_tcp_csum_encap(skb);
+ enic->wq_stats[wq->index].encap_tso++;
} else {
hdr_len = skb_tcp_all_headers(skb);
enic_preload_tcp_csum(skb);
+ enic->wq_stats[wq->index].tso++;
}
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
@@ -705,7 +724,7 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
}
if (eop)
- return 0;
+ goto tso_out_stats;
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
* for additional data fragments
@@ -732,6 +751,15 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
}
}
+tso_out_stats:
+ /* calculate how many packets tso sent */
+ len = skb->len - hdr_len;
+ pkts = len / mss;
+ if ((len % mss) > 0)
+ pkts++;
+ enic->wq_stats[wq->index].packets += pkts;
+ enic->wq_stats[wq->index].bytes += (len + (pkts * hdr_len));
+
return 0;
}
@@ -764,6 +792,10 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ enic->wq_stats[wq->index].encap_csum++;
+ enic->wq_stats[wq->index].packets++;
+ enic->wq_stats[wq->index].bytes += skb->len;
+
return err;
}
@@ -780,6 +812,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
/* VLAN tag from trunking driver */
vlan_tag_insert = 1;
vlan_tag = skb_vlan_tag_get(skb);
+ enic->wq_stats[wq->index].add_vlan++;
} else if (enic->loop_enable) {
vlan_tag = enic->loop_tag;
loopback = 1;
@@ -792,7 +825,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
else if (skb->encapsulation)
err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert,
vlan_tag, loopback);
- else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ else if (skb->ip_summed == CHECKSUM_PARTIAL)
err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
vlan_tag, loopback);
else
@@ -825,13 +858,15 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
unsigned int txq_map;
struct netdev_queue *txq;
+ txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
+ wq = &enic->wq[txq_map];
+
if (skb->len <= 0) {
dev_kfree_skb_any(skb);
+ enic->wq_stats[wq->index].null_pkt++;
return NETDEV_TX_OK;
}
- txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
- wq = &enic->wq[txq_map];
txq = netdev_get_tx_queue(netdev, txq_map);
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
@@ -843,6 +878,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
skb_linearize(skb)) {
dev_kfree_skb_any(skb);
+ enic->wq_stats[wq->index].skb_linear_fail++;
return NETDEV_TX_OK;
}
@@ -854,14 +890,17 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
/* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
spin_unlock(&enic->wq_lock[txq_map]);
+ enic->wq_stats[wq->index].desc_full_awake++;
return NETDEV_TX_BUSY;
}
if (enic_queue_wq_skb(enic, wq, skb))
goto error;
- if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
+ if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) {
netif_tx_stop_queue(txq);
+ enic->wq_stats[wq->index].stopped++;
+ }
skb_tx_timestamp(skb);
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
@@ -878,7 +917,10 @@ static void enic_get_stats(struct net_device *netdev,
{
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *stats;
+ u64 pkt_truncated = 0;
+ u64 bad_fcs = 0;
int err;
+ int i;
err = enic_dev_stats_dump(enic, &stats);
/* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
@@ -897,8 +939,17 @@ static void enic_get_stats(struct net_device *netdev,
net_stats->rx_bytes = stats->rx.rx_bytes_ok;
net_stats->rx_errors = stats->rx.rx_errors;
net_stats->multicast = stats->rx.rx_multicast_frames_ok;
- net_stats->rx_over_errors = enic->rq_truncated_pkts;
- net_stats->rx_crc_errors = enic->rq_bad_fcs;
+
+ for (i = 0; i < ENIC_RQ_MAX; i++) {
+ struct enic_rq_stats *rqs = &enic->rq_stats[i];
+
+ if (!enic->rq->ctrl)
+ break;
+ pkt_truncated += rqs->pkt_truncated;
+ bad_fcs += rqs->bad_fcs;
+ }
+ net_stats->rx_over_errors = pkt_truncated;
+ net_stats->rx_crc_errors = bad_fcs;
net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
}
@@ -1261,8 +1312,10 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
return 0;
}
skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!skb)
+ if (!skb) {
+ enic->rq_stats[rq->index].no_skb++;
return -ENOMEM;
+ }
dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
DMA_FROM_DEVICE);
@@ -1313,6 +1366,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct net_device *netdev = enic->netdev;
struct sk_buff *skb;
struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct enic_rq_stats *rqstats = &enic->rq_stats[rq->index];
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -1323,8 +1377,11 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
u32 rss_hash;
bool outer_csum_ok = true, encap = false;
- if (skipped)
+ rqstats->packets++;
+ if (skipped) {
+ rqstats->desc_skip++;
return;
+ }
skb = buf->os_buf;
@@ -1342,9 +1399,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
if (!fcs_ok) {
if (bytes_written > 0)
- enic->rq_bad_fcs++;
+ rqstats->bad_fcs++;
else if (bytes_written == 0)
- enic->rq_truncated_pkts++;
+ rqstats->pkt_truncated++;
}
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
@@ -1359,7 +1416,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
/* Good receive
*/
-
+ rqstats->bytes += bytes_written;
if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
buf->os_buf = NULL;
dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
@@ -1377,11 +1434,13 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
+ rqstats->l4_rss_hash++;
break;
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
+ rqstats->l3_rss_hash++;
break;
}
}
@@ -1418,11 +1477,16 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
(ipv4_csum_ok || ipv6)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = encap;
+ if (encap)
+ rqstats->csum_unnecessary_encap++;
+ else
+ rqstats->csum_unnecessary++;
}
- if (vlan_stripped)
+ if (vlan_stripped) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
-
+ rqstats->vlan_stripped++;
+ }
skb_mark_napi_id(skb, &enic->napi[rq->index]);
if (!(netdev->features & NETIF_F_GRO))
netif_receive_skb(skb);
@@ -1435,7 +1499,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
/* Buffer overflow
*/
-
+ rqstats->pkt_truncated++;
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
@@ -1568,6 +1632,9 @@ static int enic_poll(struct napi_struct *napi, int budget)
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_set_int_moderation(enic, &enic->rq[0]);
vnic_intr_unmask(&enic->intr[intr]);
+ enic->rq_stats[0].napi_complete++;
+ } else {
+ enic->rq_stats[0].napi_repoll++;
}
return rq_work_done;
@@ -1693,6 +1760,9 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
+ enic->rq_stats[rq].napi_complete++;
+ } else {
+ enic->rq_stats[rq].napi_repoll++;
}
return work_done;
@@ -2502,6 +2572,54 @@ static void enic_clear_intr_mode(struct enic *enic)
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
}
+static void enic_get_queue_stats_rx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *rxs)
+{
+ struct enic *enic = netdev_priv(dev);
+ struct enic_rq_stats *rqstats = &enic->rq_stats[idx];
+
+ rxs->bytes = rqstats->bytes;
+ rxs->packets = rqstats->packets;
+ rxs->hw_drops = rqstats->bad_fcs + rqstats->pkt_truncated;
+ rxs->hw_drop_overruns = rqstats->pkt_truncated;
+ rxs->csum_unnecessary = rqstats->csum_unnecessary +
+ rqstats->csum_unnecessary_encap;
+}
+
+static void enic_get_queue_stats_tx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *txs)
+{
+ struct enic *enic = netdev_priv(dev);
+ struct enic_wq_stats *wqstats = &enic->wq_stats[idx];
+
+ txs->bytes = wqstats->bytes;
+ txs->packets = wqstats->packets;
+ txs->csum_none = wqstats->csum_none;
+ txs->needs_csum = wqstats->csum_partial + wqstats->encap_csum +
+ wqstats->tso;
+ txs->hw_gso_packets = wqstats->tso;
+ txs->stop = wqstats->stopped;
+ txs->wake = wqstats->wake;
+}
+
+static void enic_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rxs,
+ struct netdev_queue_stats_tx *txs)
+{
+ rxs->bytes = 0;
+ rxs->packets = 0;
+ rxs->hw_drops = 0;
+ rxs->hw_drop_overruns = 0;
+ rxs->csum_unnecessary = 0;
+ txs->bytes = 0;
+ txs->packets = 0;
+ txs->csum_none = 0;
+ txs->needs_csum = 0;
+ txs->hw_gso_packets = 0;
+ txs->stop = 0;
+ txs->wake = 0;
+}
+
static const struct net_device_ops enic_netdev_dynamic_ops = {
.ndo_open = enic_open,
.ndo_stop = enic_stop,
@@ -2550,6 +2668,12 @@ static const struct net_device_ops enic_netdev_ops = {
.ndo_features_check = enic_features_check,
};
+static const struct netdev_stat_ops enic_netdev_stat_ops = {
+ .get_queue_stats_rx = enic_get_queue_stats_rx,
+ .get_queue_stats_tx = enic_get_queue_stats_tx,
+ .get_base_stats = enic_get_base_stats,
+};
+
static void enic_dev_deinit(struct enic *enic)
{
unsigned int i;
@@ -2892,6 +3016,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &enic_netdev_dynamic_ops;
else
netdev->netdev_ops = &enic_netdev_ops;
+ netdev->stat_ops = &enic_netdev_stat_ops;
netdev->watchdog_timeo = 2 * HZ;
enic_set_ethtool_ops(netdev);
diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c
index bcfe52c11804..59ea48d4c9de 100644
--- a/drivers/net/ethernet/davicom/dm9051.c
+++ b/drivers/net/ethernet/davicom/dm9051.c
@@ -1235,6 +1235,7 @@ static const struct of_device_id dm9051_match_table[] = {
{ .compatible = "davicom,dm9051" },
{}
};
+MODULE_DEVICE_TABLE(of, dm9051_match_table);
static const struct spi_device_id dm9051_id_table[] = {
{ "dm9051", 0 },
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index cd3dc4b89518..0a161a4db242 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -49,7 +49,7 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index d5657ff15e3c..71ff9e6db209 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -13,7 +13,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include "tulip.h"
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index bd786dfbc066..5e010e1fa6f7 100644
--- a/drivers/net/ethernet/dec/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -23,7 +23,7 @@
#include <linux/pci.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index ecfad43df45a..27e01d780cd0 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -23,7 +23,7 @@
#include <linux/delay.h>
#include <linux/mii.h>
#include <linux/crc32.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/uaccess.h>
#ifdef CONFIG_SPARC
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 7bfeae04b52b..d0ea92607870 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1842,7 +1842,7 @@ static int rio_resume(struct device *device)
return 0;
}
-static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume);
#define RIO_PM_OPS (&rio_pm_ops)
#else
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 61fe9625bed1..e48b861e4ce1 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -966,9 +966,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
void be_link_status_update(struct be_adapter *adapter, u8 link_status);
void be_parse_stats(struct be_adapter *adapter);
int be_load_fw(struct be_adapter *adapter, u8 *func);
-bool be_is_wol_supported(struct be_adapter *adapter);
bool be_pause_supported(struct be_adapter *adapter);
-u32 be_get_fw_log_level(struct be_adapter *adapter);
int be_update_queues(struct be_adapter *adapter);
int be_poll(struct napi_struct *napi, int budget);
void be_eqd_update(struct be_adapter *adapter, bool force_update);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index e2085c68c0ee..d70818f06be7 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2381,7 +2381,6 @@ struct be_cmd_req_manage_iface_filters {
} __packed;
u16 be_POST_stage_get(struct be_adapter *adapter);
-int be_pci_fnum_get(struct be_adapter *adapter);
int be_fw_wait_ready(struct be_adapter *adapter);
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
bool permanent, u32 if_handle, u32 pmac_id);
@@ -2406,7 +2405,6 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q);
int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
u8 *link_status, u32 dom);
-int be_cmd_reset(struct be_adapter *adapter);
int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd);
int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
struct be_dma_mem *nonemb_cmd);
@@ -2488,7 +2486,6 @@ int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
int lancer_initiate_dump(struct be_adapter *adapter);
int lancer_delete_dump(struct be_adapter *adapter);
bool dump_present(struct be_adapter *adapter);
-int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
int be_cmd_query_port_name(struct be_adapter *adapter);
int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
index 9aa286ba1f00..228a638eae16 100644
--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -310,16 +310,12 @@ static int tsnep_ethtool_get_ts_info(struct net_device *netdev,
struct tsnep_adapter *adapter = netdev_priv(netdev);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index fddfd1dd5070..0b61f548fd18 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -24,6 +24,7 @@
#include <linux/crc32.h>
#include <linux/if_vlan.h>
#include <linux/of_net.h>
+#include <linux/phy_fixed.h>
#include <net/ip.h>
#include <net/ncsi.h>
@@ -50,6 +51,15 @@
#define FTGMAC_100MHZ 100000000
#define FTGMAC_25MHZ 25000000
+/* For NC-SI to register a fixed-link phy device */
+static struct fixed_phy_status ncsi_phy_status = {
+ .link = 1,
+ .speed = SPEED_100,
+ .duplex = DUPLEX_FULL,
+ .pause = 0,
+ .asym_pause = 0
+};
+
struct ftgmac100 {
/* Registers */
struct resource *res;
@@ -572,7 +582,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
(*processed)++;
return true;
- drop:
+drop:
/* Clean rxdes0 (which resets own bit) */
rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
@@ -656,6 +666,11 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
+ /* Ensure the descriptor config is visible before setting the tx
+ * pointer.
+ */
+ smp_wmb();
+
priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
return true;
@@ -809,6 +824,11 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
dma_wmb();
first->txdes0 = cpu_to_le32(f_ctl_stat);
+ /* Ensure the descriptor config is visible before setting the tx
+ * pointer.
+ */
+ smp_wmb();
+
/* Update next TX pointer */
priv->tx_pointer = pointer;
@@ -829,7 +849,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
- dma_err:
+dma_err:
if (net_ratelimit())
netdev_err(netdev, "map tx fragment failed\n");
@@ -851,7 +871,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
* last fragment, so we know ftgmac100_free_tx_packet()
* hasn't freed the skb yet.
*/
- drop:
+drop:
/* Drop the packet */
dev_kfree_skb_any(skb);
netdev->stats.tx_dropped++;
@@ -1344,7 +1364,7 @@ static void ftgmac100_reset(struct ftgmac100 *priv)
ftgmac100_init_all(priv, true);
netdev_dbg(netdev, "Reset done !\n");
- bail:
+bail:
if (priv->mii_bus)
mutex_unlock(&priv->mii_bus->mdio_lock);
if (netdev->phydev)
@@ -1531,7 +1551,8 @@ static int ftgmac100_open(struct net_device *netdev)
if (netdev->phydev) {
/* If we have a PHY, start polling */
phy_start(netdev->phydev);
- } else if (priv->use_ncsi) {
+ }
+ if (priv->use_ncsi) {
/* If using NC-SI, set our carrier on and start the stack */
netif_carrier_on(netdev);
@@ -1543,15 +1564,16 @@ static int ftgmac100_open(struct net_device *netdev)
return 0;
- err_ncsi:
+err_ncsi:
+ phy_stop(netdev->phydev);
napi_disable(&priv->napi);
netif_stop_queue(netdev);
- err_alloc:
+err_alloc:
ftgmac100_free_buffers(priv);
free_irq(netdev->irq, netdev);
- err_irq:
+err_irq:
netif_napi_del(&priv->napi);
- err_hw:
+err_hw:
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
ftgmac100_free_rings(priv);
return err;
@@ -1577,7 +1599,7 @@ static int ftgmac100_stop(struct net_device *netdev)
netif_napi_del(&priv->napi);
if (netdev->phydev)
phy_stop(netdev->phydev);
- else if (priv->use_ncsi)
+ if (priv->use_ncsi)
ncsi_stop_dev(priv->ndev);
ftgmac100_stop_hw(priv);
@@ -1715,6 +1737,9 @@ static void ftgmac100_phy_disconnect(struct net_device *netdev)
phy_disconnect(netdev->phydev);
if (of_phy_is_fixed_link(priv->dev->of_node))
of_phy_deregister_fixed_link(priv->dev->of_node);
+
+ if (priv->use_ncsi)
+ fixed_phy_unregister(netdev->phydev);
}
static void ftgmac100_destroy_mdio(struct net_device *netdev)
@@ -1792,6 +1817,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
struct resource *res;
int irq;
struct net_device *netdev;
+ struct phy_device *phydev;
struct ftgmac100 *priv;
struct device_node *np;
int err = 0;
@@ -1879,6 +1905,19 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = -EINVAL;
goto err_phy_connect;
}
+
+ phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, np);
+ if (IS_ERR(phydev)) {
+ dev_err(&pdev->dev, "failed to register fixed PHY device\n");
+ err = PTR_ERR(phydev);
+ goto err_phy_connect;
+ }
+ err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (err) {
+ dev_err(&pdev->dev, "Connecting PHY failed\n");
+ goto err_phy_connect;
+ }
} else if (np && of_phy_is_fixed_link(np)) {
struct phy_device *phy;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
index 63b3e02fab16..4968f6f0bdbc 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.h
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
@@ -84,7 +84,7 @@
FTGMAC100_INT_RPKT_BUF)
/* All the interrupts we care about */
-#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF | \
+#define FTGMAC100_INT_ALL (FTGMAC100_INT_RXTX | \
FTGMAC100_INT_BAD)
/*
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index cfe6b57b1da0..e15dd3d858df 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -229,7 +229,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->max_mtu = dpaa_get_max_mtu();
net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_LLTX | NETIF_F_RXHASH);
+ NETIF_F_RXHASH);
net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
/* The kernels enables GSO automatically, if we declare NETIF_F_SG.
@@ -239,6 +239,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->features |= NETIF_F_RXCSUM;
net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ net_dev->lltx = true;
/* we do not want shared skbs on TX */
net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
@@ -2272,12 +2273,12 @@ static netdev_tx_t
dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
{
const int queue_mapping = skb_get_queue_mapping(skb);
- bool nonlinear = skb_is_nonlinear(skb);
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
struct netdev_queue *txq;
struct dpaa_priv *priv;
struct qm_fd fd;
+ bool nonlinear;
int offset = 0;
int err = 0;
@@ -2287,6 +2288,13 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
qm_fd_clear_fd(&fd);
+ /* Packet data is always read as 32-bit words, so zero out any part of
+ * the skb which might be sent if we have to pad the packet
+ */
+ if (__skb_put_padto(skb, ETH_ZLEN, false))
+ goto enomem;
+
+ nonlinear = skb_is_nonlinear(skb);
if (!nonlinear) {
/* We're going to store the skb backpointer at the beginning
* of the data buffer, so we need a privately owned skb
@@ -3156,8 +3164,9 @@ static void dpaa_napi_del(struct net_device *net_dev)
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- netif_napi_del(&percpu_priv->np.napi);
+ __netif_napi_del(&percpu_priv->np.napi);
}
+ synchronize_net();
}
static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 6866807973da..29886a8ba73f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4594,12 +4594,13 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
net_dev->priv_flags |= supported;
net_dev->priv_flags &= ~not_supported;
+ net_dev->lltx = true;
/* Features */
net_dev->features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG | NETIF_F_HIGHDMA |
- NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
+ NETIF_F_HW_TC | NETIF_F_TSO;
net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
net_dev->hw_features = net_dev->features;
net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index a71f848adc05..a293b08f36d4 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -2638,13 +2638,14 @@ static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
{
- int *count, i;
+ int *count, ret, i;
for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
+ ret = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
count = &ethsw->buf_count;
- *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
+ *count += ret;
- if (unlikely(*count < BUFS_PER_CMD))
+ if (unlikely(ret < BUFS_PER_CMD))
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 5c45f42232d3..c09370eab319 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -902,6 +902,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
__netif_subqueue_stopped(ndev, tx_ring->index) &&
+ !test_bit(ENETC_TX_DOWN, &priv->flags) &&
(enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
netif_wake_subqueue(ndev, tx_ring->index);
}
@@ -977,7 +978,6 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
return j;
}
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static void enetc_get_rx_tstamp(struct net_device *ndev,
union enetc_rx_bd *rxbd,
struct sk_buff *skb)
@@ -1001,7 +1001,6 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
}
}
-#endif
static void enetc_get_offloads(struct enetc_bdr *rx_ring,
union enetc_rx_bd *rxbd, struct sk_buff *skb)
@@ -1041,10 +1040,9 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
__vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
}
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
- if (priv->active_offloads & ENETC_F_RX_TSTAMP)
+ if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) &&
+ (priv->active_offloads & ENETC_F_RX_TSTAMP))
enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
-#endif
}
/* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
@@ -1380,6 +1378,9 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
int xdp_tx_bd_cnt, i, k;
int xdp_tx_frm_cnt = 0;
+ if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags)))
+ return -ENETDOWN;
+
enetc_lock_mdio();
tx_ring = priv->xdp_tx_ring[smp_processor_id()];
@@ -1524,7 +1525,6 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
&rx_ring->rx_swbd[rx_ring_first]);
enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
}
- rx_ring->stats.xdp_drops++;
}
static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
@@ -1589,6 +1589,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
fallthrough;
case XDP_DROP:
enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_drops++;
break;
case XDP_PASS:
rxbd = orig_rxbd;
@@ -1605,6 +1606,12 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
break;
case XDP_TX:
tx_ring = priv->xdp_tx_ring[rx_ring->index];
+ if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ tx_ring->stats.xdp_tx_drops++;
+ break;
+ }
+
xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
rx_ring,
orig_i, i);
@@ -2226,18 +2233,24 @@ static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
}
-static void enetc_enable_bdrs(struct enetc_ndev_priv *priv)
+static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_enable_txbdr(hw, priv->tx_ring[i]);
-
for (i = 0; i < priv->num_rx_rings; i++)
enetc_enable_rxbdr(hw, priv->rx_ring[i]);
}
+static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_enable_txbdr(hw, priv->tx_ring[i]);
+}
+
static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
{
int idx = rx_ring->index;
@@ -2254,18 +2267,24 @@ static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
}
-static void enetc_disable_bdrs(struct enetc_ndev_priv *priv)
+static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_disable_txbdr(hw, priv->tx_ring[i]);
-
for (i = 0; i < priv->num_rx_rings; i++)
enetc_disable_rxbdr(hw, priv->rx_ring[i]);
}
+static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_disable_txbdr(hw, priv->tx_ring[i]);
+}
+
static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
{
int delay = 8, timeout = 100;
@@ -2305,12 +2324,11 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
priv->ndev->name, i);
- err = request_irq(irq, enetc_msix, 0, v->name, v);
+ err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v);
if (err) {
dev_err(priv->dev, "request_irq() failed!\n");
goto irq_err;
}
- disable_irq(irq);
v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
@@ -2464,9 +2482,13 @@ void enetc_start(struct net_device *ndev)
enable_irq(irq);
}
- enetc_enable_bdrs(priv);
+ enetc_enable_tx_bdrs(priv);
+
+ enetc_enable_rx_bdrs(priv);
netif_tx_start_all_queues(ndev);
+
+ clear_bit(ENETC_TX_DOWN, &priv->flags);
}
EXPORT_SYMBOL_GPL(enetc_start);
@@ -2524,9 +2546,15 @@ void enetc_stop(struct net_device *ndev)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int i;
+ set_bit(ENETC_TX_DOWN, &priv->flags);
+
netif_tx_stop_all_queues(ndev);
- enetc_disable_bdrs(priv);
+ enetc_disable_rx_bdrs(priv);
+
+ enetc_wait_bdrs(priv);
+
+ enetc_disable_tx_bdrs(priv);
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(priv->si->pdev,
@@ -2537,8 +2565,6 @@ void enetc_stop(struct net_device *ndev)
napi_disable(&priv->int_vector[i]->napi);
}
- enetc_wait_bdrs(priv);
-
enetc_clear_interrupts(priv);
}
EXPORT_SYMBOL_GPL(enetc_stop);
@@ -2882,7 +2908,6 @@ void enetc_set_features(struct net_device *ndev, netdev_features_t features)
}
EXPORT_SYMBOL_GPL(enetc_set_features);
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -2951,17 +2976,17 @@ static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
-#endif
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
- if (cmd == SIOCSHWTSTAMP)
- return enetc_hwtstamp_set(ndev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return enetc_hwtstamp_get(ndev, rq);
-#endif
+
+ if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) {
+ if (cmd == SIOCSHWTSTAMP)
+ return enetc_hwtstamp_set(ndev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return enetc_hwtstamp_get(ndev, rq);
+ }
if (!priv->phylink)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index a9c2ff22431c..fb7d98d57783 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -184,10 +184,9 @@ static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
{
int hw_idx = i;
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
- if (rx_ring->ext_en)
+ if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en)
hw_idx = 2 * i;
-#endif
+
return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
}
@@ -199,10 +198,8 @@ static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
new_rxbd++;
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
- if (rx_ring->ext_en)
+ if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en)
new_rxbd++;
-#endif
if (unlikely(++new_index == rx_ring->bd_count)) {
new_rxbd = rx_ring->bd_base;
@@ -328,6 +325,7 @@ enum enetc_active_offloads {
enum enetc_flags_bit {
ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
+ ENETC_TX_DOWN,
};
/* interrupt coalescing modes */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 5e684b23c5f5..2563eb8ac7b6 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -849,28 +849,26 @@ static int enetc_get_ts_info(struct net_device *ndev,
if (phc_idx) {
info->phc_index = *phc_idx;
symbol_put(enetc_phc_index);
- } else {
- info->phc_index = -1;
}
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) {
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
+
+ return 0;
+ }
+
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON) |
(1 << HWTSTAMP_TX_ONESTEP_SYNC);
+
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
-#else
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
-#endif
+
return 0;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 11b14555802c..8f6b0bf48139 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/fsl/enetc_mdio.h>
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index a19cb2a786fd..1cca0425d493 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -691,10 +691,19 @@ struct fec_enet_private {
/* XDP BPF Program */
struct bpf_prog *xdp_prog;
+ struct {
+ int pps_enable;
+ u64 ns_sys, ns_phc;
+ u32 at_corr;
+ u8 at_inc_corr;
+ } ptp_saved_state;
+
u64 ethtool_stats[];
};
void fec_ptp_init(struct platform_device *pdev, int irq_idx);
+void fec_ptp_restore_state(struct fec_enet_private *fep);
+void fec_ptp_save_state(struct fec_enet_private *fep);
void fec_ptp_stop(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a923cb95cdc6..9d9fcec41488 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1077,6 +1077,9 @@ fec_restart(struct net_device *ndev)
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = FEC_ECR_ETHEREN;
+ if (fep->bufdesc_ex)
+ fec_ptp_save_state(fep);
+
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
@@ -1244,8 +1247,10 @@ fec_restart(struct net_device *ndev)
writel(ecntl, fep->hwp + FEC_ECNTRL);
fec_enet_active_rxring(ndev);
- if (fep->bufdesc_ex)
+ if (fep->bufdesc_ex) {
fec_ptp_start_cyclecounter(ndev);
+ fec_ptp_restore_state(fep);
+ }
/* Enable interrupts we wish to service */
if (fep->link)
@@ -1336,6 +1341,9 @@ fec_stop(struct net_device *ndev)
netdev_err(ndev, "Graceful transmit stop did not complete!\n");
}
+ if (fep->bufdesc_ex)
+ fec_ptp_save_state(fep);
+
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
@@ -1366,6 +1374,9 @@ fec_stop(struct net_device *ndev)
val = readl(fep->hwp + FEC_ECNTRL);
val |= FEC_ECR_EN1588;
writel(val, fep->hwp + FEC_ECNTRL);
+
+ fec_ptp_start_cyclecounter(ndev);
+ fec_ptp_restore_state(fep);
}
}
@@ -2775,15 +2786,11 @@ static int fec_enet_get_ts_info(struct net_device *ndev,
if (fep->bufdesc_ex) {
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (fep->ptp_clock)
info->phc_index = ptp_clock_index(fep->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -4606,7 +4613,7 @@ fec_drv_remove(struct platform_device *pdev)
free_netdev(ndev);
}
-static int __maybe_unused fec_suspend(struct device *dev)
+static int fec_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4659,7 +4666,7 @@ static int __maybe_unused fec_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused fec_resume(struct device *dev)
+static int fec_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4714,7 +4721,7 @@ failed_clk:
return ret;
}
-static int __maybe_unused fec_runtime_suspend(struct device *dev)
+static int fec_runtime_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4725,7 +4732,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused fec_runtime_resume(struct device *dev)
+static int fec_runtime_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4746,14 +4753,14 @@ failed_clk_ipg:
}
static const struct dev_pm_ops fec_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
- SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
+ RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
};
static struct platform_driver fec_driver = {
.driver = {
.name = DRIVER_NAME,
- .pm = &fec_pm_ops,
+ .pm = pm_ptr(&fec_pm_ops),
.of_match_table = fec_dt_ids,
.suppress_bind_attrs = true,
},
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 2e4f3e1782a2..a4eb6edb850a 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -91,6 +91,30 @@
#define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL
/**
+ * fec_ptp_read - read raw cycle counter (to be used by time counter)
+ * @cc: the cyclecounter structure
+ *
+ * this function reads the cyclecounter registers and is called by the
+ * cyclecounter structure used to construct a ns counter from the
+ * arbitrary fixed point registers
+ */
+static u64 fec_ptp_read(const struct cyclecounter *cc)
+{
+ struct fec_enet_private *fep =
+ container_of(cc, struct fec_enet_private, cc);
+ u32 tempval;
+
+ tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+ tempval |= FEC_T_CTRL_CAPTURE;
+ writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+ udelay(1);
+
+ return readl(fep->hwp + FEC_ATIME);
+}
+
+/**
* fec_ptp_enable_pps
* @fep: the fec_enet_private structure handle
* @enable: enable the channel pps output
@@ -136,7 +160,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = fep->cc.read(&fep->cc);
+ tempval = fec_ptp_read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
@@ -211,13 +235,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private *fep)
timecounter_read(&fep->tc);
/* Get the current ptp hardware time counter */
- temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
- temp_val |= FEC_T_CTRL_CAPTURE;
- writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
- if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
- udelay(1);
-
- ptp_hc = readl(fep->hwp + FEC_ATIME);
+ ptp_hc = fec_ptp_read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
curr_time = timecounter_cyc2time(&fep->tc, ptp_hc);
@@ -272,30 +290,6 @@ static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer)
}
/**
- * fec_ptp_read - read raw cycle counter (to be used by time counter)
- * @cc: the cyclecounter structure
- *
- * this function reads the cyclecounter registers and is called by the
- * cyclecounter structure used to construct a ns counter from the
- * arbitrary fixed point registers
- */
-static u64 fec_ptp_read(const struct cyclecounter *cc)
-{
- struct fec_enet_private *fep =
- container_of(cc, struct fec_enet_private, cc);
- u32 tempval;
-
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
- udelay(1);
-
- return readl(fep->hwp + FEC_ATIME);
-}
-
-/**
* fec_ptp_start_cyclecounter - create the cycle counter from hw
* @ndev: network device
*
@@ -770,6 +764,56 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
schedule_delayed_work(&fep->time_keep, HZ);
}
+void fec_ptp_save_state(struct fec_enet_private *fep)
+{
+ unsigned long flags;
+ u32 atime_inc_corr;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ fep->ptp_saved_state.pps_enable = fep->pps_enable;
+
+ fep->ptp_saved_state.ns_phc = timecounter_read(&fep->tc);
+ fep->ptp_saved_state.ns_sys = ktime_get_ns();
+
+ fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR);
+ atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK;
+ fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET);
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+}
+
+/* Restore PTP functionality after a reset */
+void fec_ptp_restore_state(struct fec_enet_private *fep)
+{
+ u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
+ unsigned long flags;
+ u32 counter;
+ u64 ns;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ /* Reset turned it off, so adjust our status flag */
+ fep->pps_enable = 0;
+
+ writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR);
+ atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET;
+ writel(atime_inc, fep->hwp + FEC_ATIME_INC);
+
+ ns = ktime_get_ns() - fep->ptp_saved_state.ns_sys + fep->ptp_saved_state.ns_phc;
+ counter = ns & fep->cc.mask;
+ writel(counter, fep->hwp + FEC_ATIME);
+ timecounter_init(&fep->tc, &fep->cc, ns);
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ /* Restart PPS if needed */
+ if (fep->ptp_saved_state.pps_enable) {
+ /* Re-enable PPS */
+ fec_ptp_enable_pps(fep, 1);
+ }
+}
+
void fec_ptp_stop(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 406e75e9e5ea..f17a4e511510 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1748,7 +1748,7 @@ static int fman_port_probe(struct platform_device *of_dev)
struct resource res;
struct resource *dev_res;
u32 val;
- int err = 0, lenp;
+ int err = 0;
enum fman_port_type port_type;
u16 port_speed;
u8 port_id;
@@ -1795,7 +1795,7 @@ static int fman_port_probe(struct platform_device *of_dev)
if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
port_type = FMAN_PORT_TYPE_TX;
port_speed = 1000;
- if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
+ if (of_property_read_bool(port_node, "fsl,fman-10g-port"))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
@@ -1808,7 +1808,7 @@ static int fman_port_probe(struct platform_device *of_dev)
} else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
port_type = FMAN_PORT_TYPE_RX;
port_speed = 1000;
- if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
+ if (of_property_read_bool(port_node, "fsl,fman-10g-port"))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig
index 7f20840fde07..57013bf14d7c 100644
--- a/drivers/net/ethernet/freescale/fs_enet/Kconfig
+++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig
@@ -3,7 +3,7 @@ config FS_ENET
tristate "Freescale Ethernet Driver"
depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
select MII
- select PHYLIB
+ select PHYLINK
config FS_ENET_MPC5121_FEC
def_bool y if (FS_ENET && PPC_MPC512x)
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index cf392faa6105..3425c4a6abcb 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
*
@@ -9,10 +10,6 @@
*
* Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
* and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -29,17 +26,18 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/property.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/pgtable.h>
+#include <linux/rtnetlink.h>
#include <linux/vmalloc.h>
#include <asm/irq.h>
@@ -72,6 +70,13 @@ static void fs_set_multicast_list(struct net_device *dev)
(*fep->ops->set_multicast_list)(dev);
}
+static int fs_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ return phylink_mii_ioctl(fep->phylink, ifr, cmd);
+}
+
static void skb_align(struct sk_buff *skb, int align)
{
int off = ((unsigned long)skb->data) & (align - 1);
@@ -84,15 +89,14 @@ static void skb_align(struct sk_buff *skb, int align)
static int fs_enet_napi(struct napi_struct *napi, int budget)
{
struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
- struct net_device *dev = fep->ndev;
const struct fs_platform_info *fpi = fep->fpi;
- cbd_t __iomem *bdp;
+ struct net_device *dev = fep->ndev;
+ int curidx, dirtyidx, received = 0;
+ int do_wake = 0, do_restart = 0;
+ int tx_left = TX_RING_SIZE;
struct sk_buff *skb, *skbn;
- int received = 0;
+ cbd_t __iomem *bdp;
u16 pkt_len, sc;
- int curidx;
- int dirtyidx, do_wake, do_restart;
- int tx_left = TX_RING_SIZE;
spin_lock(&fep->tx_lock);
bdp = fep->dirty_tx;
@@ -100,7 +104,6 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
/* clear status bits for napi*/
(*fep->ops->napi_clear_event)(dev);
- do_wake = do_restart = 0;
while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) {
dirtyidx = bdp - fep->tx_bd_base;
@@ -109,12 +112,9 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
skb = fep->tx_skbuff[dirtyidx];
- /*
- * Check for errors.
- */
+ /* Check for errors. */
if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
-
if (sc & BD_ENET_TX_HB) /* No heartbeat */
dev->stats.tx_heartbeat_errors++;
if (sc & BD_ENET_TX_LC) /* Late collision */
@@ -130,16 +130,16 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
dev->stats.tx_errors++;
do_restart = 1;
}
- } else
+ } else {
dev->stats.tx_packets++;
+ }
if (sc & BD_ENET_TX_READY) {
dev_warn(fep->dev,
"HEY! Enet xmit interrupt and TX_READY.\n");
}
- /*
- * Deferred means some collisions occurred during transmit,
+ /* Deferred means some collisions occurred during transmit,
* but we eventually sent the packet OK.
*/
if (sc & BD_ENET_TX_DEF)
@@ -153,25 +153,20 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
CBDR_DATLEN(bdp), DMA_TO_DEVICE);
- /*
- * Free the sk buffer associated with this last transmit.
- */
+ /* Free the sk buffer associated with this last transmit. */
if (skb) {
dev_kfree_skb(skb);
fep->tx_skbuff[dirtyidx] = NULL;
}
- /*
- * Update pointer to next buffer descriptor to be transmitted.
+ /* Update pointer to next buffer descriptor to be transmitted.
*/
if ((sc & BD_ENET_TX_WRAP) == 0)
bdp++;
else
bdp = fep->tx_bd_base;
- /*
- * Since we have freed up a buffer, the ring is no longer
- * full.
+ /* Since we have freed up a buffer, the ring is no longer full.
*/
if (++fep->tx_free == MAX_SKB_FRAGS)
do_wake = 1;
@@ -188,8 +183,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
if (do_wake)
netif_wake_queue(dev);
- /*
- * First, grab all of the stats for the incoming packet.
+ /* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
bdp = fep->cur_rx;
@@ -198,16 +192,13 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
received < budget) {
curidx = bdp - fep->rx_bd_base;
- /*
- * Since we have allocated space to hold a complete frame,
+ /* Since we have allocated space to hold a complete frame,
* the last indicator should be set.
*/
if ((sc & BD_ENET_RX_LAST) == 0)
dev_warn(fep->dev, "rcv is not +last\n");
- /*
- * Check for errors.
- */
+ /* Check for errors. */
if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
dev->stats.rx_errors++;
@@ -228,9 +219,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
} else {
skb = fep->rx_skbuff[curidx];
- /*
- * Process the incoming frame.
- */
+ /* Process the incoming frame */
dev->stats.rx_packets++;
pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
dev->stats.rx_bytes += pkt_len + 4;
@@ -238,15 +227,15 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
if (pkt_len <= fpi->rx_copybreak) {
/* +2 to make IP header L1 cache aligned */
skbn = netdev_alloc_skb(dev, pkt_len + 2);
- if (skbn != NULL) {
+ if (skbn) {
skb_reserve(skbn, 2); /* align IP header */
- skb_copy_from_linear_data(skb,
- skbn->data, pkt_len);
+ skb_copy_from_linear_data(skb, skbn->data,
+ pkt_len);
swap(skb, skbn);
dma_sync_single_for_cpu(fep->dev,
- CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(pkt_len),
- DMA_FROM_DEVICE);
+ CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(pkt_len),
+ DMA_FROM_DEVICE);
}
} else {
skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
@@ -256,20 +245,18 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
skb_align(skbn, ENET_RX_ALIGN);
- dma_unmap_single(fep->dev,
- CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
- dma = dma_map_single(fep->dev,
- skbn->data,
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
+ dma = dma_map_single(fep->dev, skbn->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
CBDW_BUFADDR(bdp, dma);
}
}
- if (skbn != NULL) {
+ if (skbn) {
skb_put(skb, pkt_len); /* Make room */
skb->protocol = eth_type_trans(skb, dev);
received++;
@@ -284,9 +271,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
CBDW_DATLEN(bdp, 0);
CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
- /*
- * Update BD pointer to next entry.
- */
+ /* Update BD pointer to next entry */
if ((sc & BD_ENET_RX_WRAP) == 0)
bdp++;
else
@@ -308,19 +293,16 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
return budget;
}
-/*
- * The interrupt handler.
+/* The interrupt handler.
* This is called from the MPC core interrupt.
*/
static irqreturn_t
fs_enet_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
+ u32 int_events, int_clr_events;
struct fs_enet_private *fep;
- u32 int_events;
- u32 int_clr_events;
- int nr, napi_ok;
- int handled;
+ int nr, napi_ok, handled;
fep = netdev_priv(dev);
@@ -342,12 +324,12 @@ fs_enet_interrupt(int irq, void *dev_id)
(*fep->ops->napi_disable)(dev);
(*fep->ops->clear_int_events)(dev, fep->ev_napi);
- /* NOTE: it is possible for FCCs in NAPI mode */
- /* to submit a spurious interrupt while in poll */
+ /* NOTE: it is possible for FCCs in NAPI mode
+ * to submit a spurious interrupt while in poll
+ */
if (napi_ok)
__napi_schedule(&fep->napi);
}
-
}
handled = nr > 0;
@@ -357,45 +339,40 @@ fs_enet_interrupt(int irq, void *dev_id)
void fs_init_bds(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- cbd_t __iomem *bdp;
struct sk_buff *skb;
+ cbd_t __iomem *bdp;
int i;
fs_cleanup_bds(dev);
- fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+ fep->dirty_tx = fep->tx_bd_base;
+ fep->cur_tx = fep->tx_bd_base;
fep->tx_free = fep->tx_ring;
fep->cur_rx = fep->rx_bd_base;
- /*
- * Initialize the receive buffer descriptors.
- */
+ /* Initialize the receive buffer descriptors */
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
- if (skb == NULL)
+ if (!skb)
break;
skb_align(skb, ENET_RX_ALIGN);
fep->rx_skbuff[i] = skb;
- CBDW_BUFADDR(bdp,
- dma_map_single(fep->dev, skb->data,
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE));
+ CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skb->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE));
CBDW_DATLEN(bdp, 0); /* zero */
CBDW_SC(bdp, BD_ENET_RX_EMPTY |
((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
}
- /*
- * if we failed, fillup remainder
- */
+
+ /* if we failed, fillup remainder */
for (; i < fep->rx_ring; i++, bdp++) {
fep->rx_skbuff[i] = NULL;
CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
}
- /*
- * ...and the same for transmit.
- */
+ /* ...and the same for transmit. */
for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
fep->tx_skbuff[i] = NULL;
CBDW_BUFADDR(bdp, 0);
@@ -411,32 +388,30 @@ void fs_cleanup_bds(struct net_device *dev)
cbd_t __iomem *bdp;
int i;
- /*
- * Reset SKB transmit buffers.
- */
+ /* Reset SKB transmit buffers. */
for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
- if ((skb = fep->tx_skbuff[i]) == NULL)
+ skb = fep->tx_skbuff[i];
+ if (!skb)
continue;
/* unmap */
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- skb->len, DMA_TO_DEVICE);
+ skb->len, DMA_TO_DEVICE);
fep->tx_skbuff[i] = NULL;
dev_kfree_skb(skb);
}
- /*
- * Reset SKB receive buffers
- */
+ /* Reset SKB receive buffers */
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
- if ((skb = fep->rx_skbuff[i]) == NULL)
+ skb = fep->rx_skbuff[i];
+ if (!skb)
continue;
/* unmap */
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
fep->rx_skbuff[i] = NULL;
@@ -444,12 +419,8 @@ void fs_cleanup_bds(struct net_device *dev)
}
}
-/**********************************************************************************/
-
#ifdef CONFIG_FS_ENET_MPC5121_FEC
-/*
- * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
- */
+/* MPC5121 FEC requires 4-byte alignment for TX data buffer! */
static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
struct sk_buff *skb)
{
@@ -481,15 +452,12 @@ static netdev_tx_t
fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
+ int curidx, nr_frags, len;
cbd_t __iomem *bdp;
- int curidx;
- u16 sc;
- int nr_frags;
skb_frag_t *frag;
- int len;
+ u16 sc;
#ifdef CONFIG_FS_ENET_MPC5121_FEC
- int is_aligned = 1;
- int i;
+ int i, is_aligned = 1;
if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
is_aligned = 0;
@@ -507,8 +475,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!is_aligned) {
skb = tx_skb_align_workaround(dev, skb);
if (!skb) {
- /*
- * We have lost packet due to memory allocation error
+ /* We have lost packet due to memory allocation error
* in tx_skb_align_workaround(). Hopefully original
* skb is still valid, so try transmit it later.
*/
@@ -519,9 +486,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock(&fep->tx_lock);
- /*
- * Fill in a Tx ring entry
- */
+ /* Fill in a Tx ring entry */
bdp = fep->cur_tx;
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -529,8 +494,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
spin_unlock(&fep->tx_lock);
- /*
- * Ooops. All transmit buffers are full. Bail out.
+ /* Ooops. All transmit buffers are full. Bail out.
* This should not happen, since the tx queue should be stopped.
*/
dev_warn(fep->dev, "tx queue full!.\n");
@@ -543,12 +507,12 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += len;
if (nr_frags)
len -= skb->data_len;
+
fep->tx_free -= nr_frags + 1;
- /*
- * Push the data cache so the CPM does not get stale memory data.
+ /* Push the data cache so the CPM does not get stale memory data.
*/
CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
- skb->data, len, DMA_TO_DEVICE));
+ skb->data, len, DMA_TO_DEVICE));
CBDW_DATLEN(bdp, len);
fep->mapped_as_page[curidx] = 0;
@@ -585,9 +549,11 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* note that while FEC does not have this bit
* it marks it as available for software use
- * yay for hw reuse :) */
+ * yay for hw reuse :)
+ */
if (skb->len <= 60)
sc |= BD_ENET_TX_PAD;
+
CBDC_SC(bdp, BD_ENET_TX_STATS);
CBDS_SC(bdp, sc);
@@ -599,6 +565,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
bdp++;
else
bdp = fep->tx_bd_base;
+
fep->cur_tx = bdp;
if (fep->tx_free < MAX_SKB_FRAGS)
@@ -623,15 +590,21 @@ static void fs_timeout_work(struct work_struct *work)
dev->stats.tx_errors++;
- spin_lock_irqsave(&fep->lock, flags);
+ /* In the event a timeout was detected, but the netdev is brought down
+ * shortly after, it no longer makes sense to try to recover from the
+ * timeout. netif_running() will return false when called from the
+ * .ndo_close() callback. Calling the following recovery code while
+ * called from .ndo_close() could deadlock on rtnl.
+ */
+ if (!netif_running(dev))
+ return;
- if (dev->flags & IFF_UP) {
- phy_stop(dev->phydev);
- (*fep->ops->stop)(dev);
- (*fep->ops->restart)(dev);
- }
+ rtnl_lock();
+ phylink_stop(fep->phylink);
+ phylink_start(fep->phylink);
+ rtnl_unlock();
- phy_start(dev->phydev);
+ spin_lock_irqsave(&fep->lock, flags);
wake = fep->tx_free >= MAX_SKB_FRAGS &&
!(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
spin_unlock_irqrestore(&fep->lock, flags);
@@ -647,82 +620,37 @@ static void fs_timeout(struct net_device *dev, unsigned int txqueue)
schedule_work(&fep->timeout_work);
}
-/*-----------------------------------------------------------------------------
- * generic link-change handler - should be sufficient for most cases
- *-----------------------------------------------------------------------------*/
-static void generic_adjust_link(struct net_device *dev)
+static void fs_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- int new_state = 0;
-
- if (phydev->link) {
- /* adjust to duplex mode */
- if (phydev->duplex != fep->oldduplex) {
- new_state = 1;
- fep->oldduplex = phydev->duplex;
- }
-
- if (phydev->speed != fep->oldspeed) {
- new_state = 1;
- fep->oldspeed = phydev->speed;
- }
-
- if (!fep->oldlink) {
- new_state = 1;
- fep->oldlink = 1;
- }
-
- if (new_state)
- fep->ops->restart(dev);
- } else if (fep->oldlink) {
- new_state = 1;
- fep->oldlink = 0;
- fep->oldspeed = 0;
- fep->oldduplex = -1;
- }
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct fs_enet_private *fep = netdev_priv(ndev);
+ unsigned long flags;
- if (new_state && netif_msg_link(fep))
- phy_print_status(phydev);
+ spin_lock_irqsave(&fep->lock, flags);
+ fep->ops->restart(ndev, interface, speed, duplex);
+ spin_unlock_irqrestore(&fep->lock, flags);
}
-
-static void fs_adjust_link(struct net_device *dev)
+static void fs_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
{
- struct fs_enet_private *fep = netdev_priv(dev);
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct fs_enet_private *fep = netdev_priv(ndev);
unsigned long flags;
spin_lock_irqsave(&fep->lock, flags);
-
- if(fep->ops->adjust_link)
- fep->ops->adjust_link(dev);
- else
- generic_adjust_link(dev);
-
+ fep->ops->stop(ndev);
spin_unlock_irqrestore(&fep->lock, flags);
}
-static int fs_init_phy(struct net_device *dev)
+static void fs_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct phy_device *phydev;
- phy_interface_t iface;
-
- fep->oldlink = 0;
- fep->oldspeed = 0;
- fep->oldduplex = -1;
-
- iface = fep->fpi->use_rmii ?
- PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII;
-
- phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
- iface);
- if (!phydev) {
- dev_err(&dev->dev, "Could not attach to PHY\n");
- return -ENODEV;
- }
-
- return 0;
+ /* Nothing to do */
}
static int fs_enet_open(struct net_device *dev)
@@ -731,8 +659,9 @@ static int fs_enet_open(struct net_device *dev)
int r;
int err;
- /* to initialize the fep->cur_rx,... */
- /* not doing this, will cause a crash in fs_enet_napi */
+ /* to initialize the fep->cur_rx,...
+ * not doing this, will cause a crash in fs_enet_napi
+ */
fs_init_bds(fep->ndev);
napi_enable(&fep->napi);
@@ -746,13 +675,13 @@ static int fs_enet_open(struct net_device *dev)
return -EINVAL;
}
- err = fs_init_phy(dev);
+ err = phylink_of_phy_connect(fep->phylink, fep->dev->of_node, 0);
if (err) {
free_irq(fep->interrupt, dev);
napi_disable(&fep->napi);
return err;
}
- phy_start(dev->phydev);
+ phylink_start(fep->phylink);
netif_start_queue(dev);
@@ -765,28 +694,25 @@ static int fs_enet_close(struct net_device *dev)
unsigned long flags;
netif_stop_queue(dev);
- netif_carrier_off(dev);
napi_disable(&fep->napi);
- cancel_work_sync(&fep->timeout_work);
- phy_stop(dev->phydev);
+ cancel_work(&fep->timeout_work);
+ phylink_stop(fep->phylink);
spin_lock_irqsave(&fep->lock, flags);
spin_lock(&fep->tx_lock);
(*fep->ops->stop)(dev);
spin_unlock(&fep->tx_lock);
spin_unlock_irqrestore(&fep->lock, flags);
+ phylink_disconnect_phy(fep->phylink);
/* release any irqs */
- phy_disconnect(dev->phydev);
free_irq(fep->interrupt, dev);
return 0;
}
-/*************************************************************************/
-
static void fs_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+ struct ethtool_drvinfo *info)
{
strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
@@ -799,7 +725,7 @@ static int fs_get_regs_len(struct net_device *dev)
}
static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
- void *p)
+ void *p)
{
struct fs_enet_private *fep = netdev_priv(dev);
unsigned long flags;
@@ -818,12 +744,14 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
static u32 fs_get_msglevel(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
+
return fep->msg_enable;
}
static void fs_set_msglevel(struct net_device *dev, u32 value)
{
struct fs_enet_private *fep = netdev_priv(dev);
+
fep->msg_enable = value;
}
@@ -865,6 +793,22 @@ static int fs_set_tunable(struct net_device *dev,
return ret;
}
+static int fs_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_set(fep->phylink, cmd);
+}
+
+static int fs_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_get(fep->phylink, cmd);
+}
+
static const struct ethtool_ops fs_ethtool_ops = {
.get_drvinfo = fs_get_drvinfo,
.get_regs_len = fs_get_regs_len,
@@ -874,14 +818,12 @@ static const struct ethtool_ops fs_ethtool_ops = {
.set_msglevel = fs_set_msglevel,
.get_regs = fs_get_regs,
.get_ts_info = ethtool_op_get_ts_info,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = fs_ethtool_get_link_ksettings,
+ .set_link_ksettings = fs_ethtool_set_link_ksettings,
.get_tunable = fs_get_tunable,
.set_tunable = fs_set_tunable,
};
-/**************************************************************************************/
-
#ifdef CONFIG_FS_ENET_HAS_FEC
#define IS_FEC(ops) ((ops) == &fs_fec_ops)
#else
@@ -894,7 +836,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
.ndo_start_xmit = fs_enet_start_xmit,
.ndo_tx_timeout = fs_timeout,
.ndo_set_rx_mode = fs_set_multicast_list,
- .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = fs_eth_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -902,17 +844,23 @@ static const struct net_device_ops fs_enet_netdev_ops = {
#endif
};
+static const struct phylink_mac_ops fs_enet_phylink_mac_ops = {
+ .mac_config = fs_mac_config,
+ .mac_link_down = fs_mac_link_down,
+ .mac_link_up = fs_mac_link_up,
+};
+
static int fs_enet_probe(struct platform_device *ofdev)
{
+ int privsize, len, ret = -ENODEV;
+ struct fs_platform_info *fpi;
+ struct fs_enet_private *fep;
+ phy_interface_t phy_mode;
const struct fs_ops *ops;
struct net_device *ndev;
- struct fs_enet_private *fep;
- struct fs_platform_info *fpi;
+ struct phylink *phylink;
const u32 *data;
struct clk *clk;
- int err;
- const char *phy_connection_type;
- int privsize, len, ret = -ENODEV;
ops = device_get_match_data(&ofdev->dev);
if (!ops)
@@ -930,51 +878,36 @@ static int fs_enet_probe(struct platform_device *ofdev)
fpi->cp_command = *data;
}
+ ret = of_get_phy_mode(ofdev->dev.of_node, &phy_mode);
+ if (ret) {
+ /* For compatibility, if the mode isn't specified in DT,
+ * assume MII
+ */
+ phy_mode = PHY_INTERFACE_MODE_MII;
+ }
+
fpi->rx_ring = RX_RING_SIZE;
fpi->tx_ring = TX_RING_SIZE;
fpi->rx_copybreak = 240;
fpi->napi_weight = 17;
- fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
- if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
- err = of_phy_register_fixed_link(ofdev->dev.of_node);
- if (err)
- goto out_free_fpi;
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- fpi->phy_node = of_node_get(ofdev->dev.of_node);
- }
-
- if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
- phy_connection_type = of_get_property(ofdev->dev.of_node,
- "phy-connection-type", NULL);
- if (phy_connection_type && !strcmp("rmii", phy_connection_type))
- fpi->use_rmii = 1;
- }
/* make clock lookup non-fatal (the driver is shared among platforms),
* but require enable to succeed when a clock was specified/found,
* keep a reference to the clock upon successful acquisition
*/
- clk = devm_clk_get(&ofdev->dev, "per");
- if (!IS_ERR(clk)) {
- ret = clk_prepare_enable(clk);
- if (ret)
- goto out_deregister_fixed_link;
-
- fpi->clk_per = clk;
- }
+ clk = devm_clk_get_optional_enabled(&ofdev->dev, "per");
+ if (IS_ERR(clk))
+ goto out_free_fpi;
privsize = sizeof(*fep) +
- sizeof(struct sk_buff **) *
+ sizeof(struct sk_buff **) *
(fpi->rx_ring + fpi->tx_ring) +
sizeof(char) * fpi->tx_ring;
ndev = alloc_etherdev(privsize);
if (!ndev) {
ret = -ENOMEM;
- goto out_put;
+ goto out_free_fpi;
}
SET_NETDEV_DEV(ndev, &ofdev->dev);
@@ -986,9 +919,29 @@ static int fs_enet_probe(struct platform_device *ofdev)
fep->fpi = fpi;
fep->ops = ops;
+ fep->phylink_config.dev = &ndev->dev;
+ fep->phylink_config.type = PHYLINK_NETDEV;
+ fep->phylink_config.mac_capabilities = MAC_10 | MAC_100;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ fep->phylink_config.supported_interfaces);
+
+ if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec"))
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ fep->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&fep->phylink_config, dev_fwnode(fep->dev),
+ phy_mode, &fs_enet_phylink_mac_ops);
+ if (IS_ERR(phylink)) {
+ ret = PTR_ERR(phylink);
+ goto out_free_dev;
+ }
+
+ fep->phylink = phylink;
+
ret = fep->ops->setup_data(ndev);
if (ret)
- goto out_free_dev;
+ goto out_phylink;
fep->rx_skbuff = (struct sk_buff **)&fep[1];
fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
@@ -1018,8 +971,6 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->ethtool_ops = &fs_ethtool_ops;
- netif_carrier_off(ndev);
-
ndev->features |= NETIF_F_SG;
ret = register_netdev(ndev);
@@ -1034,14 +985,10 @@ out_free_bd:
fep->ops->free_bd(ndev);
out_cleanup_data:
fep->ops->cleanup_data(ndev);
+out_phylink:
+ phylink_destroy(fep->phylink);
out_free_dev:
free_netdev(ndev);
-out_put:
- clk_disable_unprepare(fpi->clk_per);
-out_deregister_fixed_link:
- of_node_put(fpi->phy_node);
- if (of_phy_is_fixed_link(ofdev->dev.of_node))
- of_phy_deregister_fixed_link(ofdev->dev.of_node);
out_free_fpi:
kfree(fpi);
return ret;
@@ -1057,10 +1004,7 @@ static void fs_enet_remove(struct platform_device *ofdev)
fep->ops->free_bd(ndev);
fep->ops->cleanup_data(ndev);
dev_set_drvdata(fep->dev, NULL);
- of_node_put(fep->fpi->phy_node);
- clk_disable_unprepare(fep->fpi->clk_per);
- if (of_phy_is_fixed_link(ofdev->dev.of_node))
- of_phy_deregister_fixed_link(ofdev->dev.of_node);
+ phylink_destroy(fep->phylink);
free_netdev(ndev);
}
@@ -1114,9 +1058,9 @@ static struct platform_driver fs_enet_driver = {
#ifdef CONFIG_NET_POLL_CONTROLLER
static void fs_enet_netpoll(struct net_device *dev)
{
- disable_irq(dev->irq);
- fs_enet_interrupt(dev->irq, dev);
- enable_irq(dev->irq);
+ disable_irq(dev->irq);
+ fs_enet_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
}
#endif
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 21c07ac05225..36e4fcc29e36 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -3,11 +3,11 @@
#define FS_ENET_H
#include <linux/clk.h>
-#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_CPM1
@@ -77,8 +77,8 @@ struct fs_ops {
void (*free_bd)(struct net_device *dev);
void (*cleanup_data)(struct net_device *dev);
void (*set_multicast_list)(struct net_device *dev);
- void (*adjust_link)(struct net_device *dev);
- void (*restart)(struct net_device *dev);
+ void (*restart)(struct net_device *dev, phy_interface_t interface,
+ int speed, int duplex);
void (*stop)(struct net_device *dev);
void (*napi_clear_event)(struct net_device *dev);
void (*napi_enable)(struct net_device *dev);
@@ -93,14 +93,6 @@ struct fs_ops {
void (*tx_restart)(struct net_device *dev);
};
-struct phy_info {
- unsigned int id;
- const char *name;
- void (*startup) (struct net_device * dev);
- void (*shutdown) (struct net_device * dev);
- void (*ack_int) (struct net_device * dev);
-};
-
/* The FEC stores dest/src/type, data, and checksum for receive packets.
*/
#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
@@ -122,15 +114,9 @@ struct fs_platform_info {
u32 dpram_offset;
- struct device_node *phy_node;
-
int rx_ring, tx_ring; /* number of buffers on rx */
int rx_copybreak; /* limit we copy small frames */
int napi_weight; /* NAPI weight */
-
- int use_rmii; /* use RMII mode */
-
- struct clk *clk_per; /* 'per' clock for register access */
};
struct fs_enet_private {
@@ -154,14 +140,11 @@ struct fs_enet_private {
cbd_t __iomem *cur_rx;
cbd_t __iomem *cur_tx;
int tx_free;
- const struct phy_info *phy;
u32 msg_enable;
- struct mii_if_info mii_if;
- unsigned int last_mii_status;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
int interrupt;
- int oldduplex, oldspeed, oldlink; /* current settings */
-
/* event masks */
u32 ev_napi; /* mask of NAPI events */
u32 ev; /* event mask */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index e2ffac9eb2ad..be63293511d9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* FCC driver for Motorola MPC82xx (PQ2).
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -25,7 +22,6 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
@@ -239,7 +235,8 @@ static void set_multicast_list(struct net_device *dev)
set_promiscuous_mode(dev);
}
-static void restart(struct net_device *dev)
+static void restart(struct net_device *dev, phy_interface_t interface,
+ int speed, int duplex)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
@@ -363,8 +360,8 @@ static void restart(struct net_device *dev)
fs_init_bds(dev);
/* adjust to speed (for RMII mode) */
- if (fpi->use_rmii) {
- if (dev->phydev->speed == 100)
+ if (interface == PHY_INTERFACE_MODE_RMII) {
+ if (speed == SPEED_100)
C8(fcccp, fcc_gfemr, 0x20);
else
S8(fcccp, fcc_gfemr, 0x20);
@@ -386,11 +383,11 @@ static void restart(struct net_device *dev)
W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);
- if (fpi->use_rmii)
+ if (interface == PHY_INTERFACE_MODE_RMII)
S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
/* adjust to duplex mode */
- if (dev->phydev->duplex)
+ if (duplex == DUPLEX_FULL)
S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
else
C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index cdc89d83cf07..f2ecd20027cf 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Freescale Ethernet controllers
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -26,7 +23,6 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
@@ -224,7 +220,8 @@ static void set_multicast_list(struct net_device *dev)
set_promiscuous_mode(dev);
}
-static void restart(struct net_device *dev)
+static void restart(struct net_device *dev, phy_interface_t interface,
+ int speed, int duplex)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
@@ -306,13 +303,13 @@ static void restart(struct net_device *dev)
* Only set MII/RMII mode - do not touch maximum frame length
* configured before.
*/
- FS(fecp, r_cntrl, fpi->use_rmii ?
- FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE);
+ FS(fecp, r_cntrl, interface == PHY_INTERFACE_MODE_RMII ?
+ FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE);
#endif
/*
* adjust to duplex mode
*/
- if (dev->phydev->duplex) {
+ if (duplex == DUPLEX_FULL) {
FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
} else {
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index a64cb6270515..6c97191649de 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -25,7 +22,6 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
@@ -131,15 +127,14 @@ static int setup_data(struct net_device *dev)
static int allocate_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
+ struct fs_platform_info *fpi = fep->fpi;
- fep->ring_mem_addr = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) *
- sizeof(cbd_t), 8);
- if (IS_ERR_VALUE(fep->ring_mem_addr))
+ fpi->dpram_offset = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), 8);
+ if (IS_ERR_VALUE(fpi->dpram_offset))
return -ENOMEM;
- fep->ring_base = (void __iomem __force*)
- cpm_muram_addr(fep->ring_mem_addr);
+ fep->ring_base = cpm_muram_addr(fpi->dpram_offset);
return 0;
}
@@ -147,9 +142,10 @@ static int allocate_bd(struct net_device *dev)
static void free_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
if (fep->ring_base)
- cpm_muram_free(fep->ring_mem_addr);
+ cpm_muram_free(fpi->dpram_offset);
}
static void cleanup_data(struct net_device *dev)
@@ -230,7 +226,8 @@ static void set_multicast_list(struct net_device *dev)
* change. This only happens when switching between half and full
* duplex.
*/
-static void restart(struct net_device *dev)
+static void restart(struct net_device *dev, phy_interface_t interface,
+ int speed, int duplex)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
@@ -247,9 +244,9 @@ static void restart(struct net_device *dev)
__fs_out8((u8 __iomem *)ep + i, 0);
/* point to bds */
- W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
+ W16(ep, sen_genscc.scc_rbase, fpi->dpram_offset);
W16(ep, sen_genscc.scc_tbase,
- fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
+ fpi->dpram_offset + sizeof(cbd_t) * fpi->rx_ring);
/* Initialize function code registers for big-endian.
*/
@@ -341,7 +338,7 @@ static void restart(struct net_device *dev)
W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
/* Set full duplex mode if needed */
- if (dev->phydev->duplex)
+ if (duplex == DUPLEX_FULL)
S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
/* Restore multicast and promiscuous settings */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index f965a2329055..2e210a003558 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 7bb69727952a..93d91e8ad0de 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2baef59f741d..ecb1703ea150 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -754,6 +754,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
err = of_get_ethdev_address(np, dev);
+ if (err == -EPROBE_DEFER)
+ goto err_grp_init;
if (err) {
eth_hw_addr_random(dev);
dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index f581402ad740..a99b95c4bcfb 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1455,12 +1455,8 @@ static int gfar_get_ts_info(struct net_device *dev,
struct device_node *ptp_node;
struct ptp_qoriq *ptp = NULL;
- info->phc_index = -1;
-
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
@@ -1478,9 +1474,7 @@ static int gfar_get_ts_info(struct net_device *dev,
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.c b/drivers/net/ethernet/fungible/funcore/fun_dev.c
index a7fbd4cd560a..ce97b76f9ae0 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_dev.c
+++ b/drivers/net/ethernet/fungible/funcore/fun_dev.c
@@ -546,17 +546,14 @@ int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
unsigned int id0, enum fun_admin_bind_type type1,
unsigned int id1)
{
- struct {
- struct fun_admin_bind_req req;
- struct fun_admin_bind_entry entry[2];
- } cmd = {
- .req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
- sizeof(cmd)),
- .entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0),
- .entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1),
- };
+ DEFINE_RAW_FLEX(struct fun_admin_bind_req, cmd, entry, 2);
+
+ cmd->common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
+ __struct_size(cmd));
+ cmd->entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0);
+ cmd->entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1);
- return fun_submit_admin_sync_cmd(fdev, &cmd.req.common, NULL, 0, 0);
+ return fun_submit_admin_sync_cmd(fdev, &cmd->common, NULL, 0, 0);
}
EXPORT_SYMBOL_GPL(fun_bind);
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index 7f081e6e8c87..ba83dbf4ed22 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -1042,12 +1042,9 @@ static int fun_set_rxfh(struct net_device *netdev,
static int fun_get_ts_info(struct net_device *netdev,
struct kernel_ethtool_ts_info *info)
{
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
+ info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 84ac004d3953..301fa1ea4f51 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -784,6 +784,8 @@ struct gve_priv {
u32 adminq_verify_driver_compatibility_cnt;
u32 adminq_query_flow_rules_cnt;
u32 adminq_cfg_flow_rule_cnt;
+ u32 adminq_cfg_rss_cnt;
+ u32 adminq_query_rss_cnt;
/* Global stats */
u32 interface_up_cnt; /* count of times interface turned up since last reset */
@@ -831,6 +833,9 @@ struct gve_priv {
u32 num_flow_rules;
struct gve_flow_rules_cache flow_rules_cache;
+
+ u16 rss_key_size;
+ u16 rss_lut_size;
};
enum gve_service_task_flags_bit {
@@ -1148,7 +1153,6 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
int idx);
void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_alloc_rings_cfg *cfg);
-int gve_rx_alloc_rings(struct gve_priv *priv);
int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_free_rings_gqi(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index c5bbc1b7524e..e44e8b139633 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -45,6 +45,7 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
struct gve_device_option_flow_steering **dev_op_flow_steering,
+ struct gve_device_option_rss_config **dev_op_rss_config,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
@@ -207,6 +208,23 @@ void gve_parse_device_option(struct gve_priv *priv,
"Flow Steering");
*dev_op_flow_steering = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_RSS_CONFIG:
+ if (option_length < sizeof(**dev_op_rss_config) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "RSS config",
+ (int)sizeof(**dev_op_rss_config),
+ GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_rss_config))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "RSS config");
+ *dev_op_rss_config = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -227,6 +245,7 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
struct gve_device_option_flow_steering **dev_op_flow_steering,
+ struct gve_device_option_rss_config **dev_op_rss_config,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
@@ -249,7 +268,8 @@ gve_process_device_options(struct gve_priv *priv,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
dev_op_dqo_qpl, dev_op_buffer_sizes,
- dev_op_flow_steering, dev_op_modify_ring);
+ dev_op_flow_steering, dev_op_rss_config,
+ dev_op_modify_ring);
dev_opt = next_opt;
}
@@ -289,6 +309,8 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_get_ptype_map_cnt = 0;
priv->adminq_query_flow_rules_cnt = 0;
priv->adminq_cfg_flow_rule_cnt = 0;
+ priv->adminq_cfg_rss_cnt = 0;
+ priv->adminq_query_rss_cnt = 0;
/* Setup Admin queue with the device */
if (priv->pdev->revision < 0x1) {
@@ -534,6 +556,12 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
case GVE_ADMINQ_CONFIGURE_FLOW_RULE:
priv->adminq_cfg_flow_rule_cnt++;
break;
+ case GVE_ADMINQ_CONFIGURE_RSS:
+ priv->adminq_cfg_rss_cnt++;
+ break;
+ case GVE_ADMINQ_QUERY_RSS:
+ priv->adminq_query_rss_cnt++;
+ break;
default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
}
@@ -867,6 +895,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
*dev_op_buffer_sizes,
const struct gve_device_option_flow_steering
*dev_op_flow_steering,
+ const struct gve_device_option_rss_config
+ *dev_op_rss_config,
const struct gve_device_option_modify_ring
*dev_op_modify_ring)
{
@@ -931,6 +961,14 @@ static void gve_enable_supported_features(struct gve_priv *priv,
priv->max_flow_rules);
}
}
+
+ if (dev_op_rss_config &&
+ (supported_features_mask & GVE_SUP_RSS_CONFIG_MASK)) {
+ priv->rss_key_size =
+ be16_to_cpu(dev_op_rss_config->hash_key_size);
+ priv->rss_lut_size =
+ be16_to_cpu(dev_op_rss_config->hash_lut_size);
+ }
}
int gve_adminq_describe_device(struct gve_priv *priv)
@@ -939,6 +977,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
+ struct gve_device_option_rss_config *dev_op_rss_config = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
@@ -973,6 +1012,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
&dev_op_jumbo_frames, &dev_op_dqo_qpl,
&dev_op_buffer_sizes,
&dev_op_flow_steering,
+ &dev_op_rss_config,
&dev_op_modify_ring);
if (err)
goto free_device_descriptor;
@@ -1035,7 +1075,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl,
dev_op_buffer_sizes, dev_op_flow_steering,
- dev_op_modify_ring);
+ dev_op_rss_config, dev_op_modify_ring);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
@@ -1248,6 +1288,81 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv)
return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
}
+int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
+{
+ dma_addr_t lut_bus = 0, key_bus = 0;
+ u16 key_size = 0, lut_size = 0;
+ union gve_adminq_command cmd;
+ __be32 *lut = NULL;
+ u8 hash_alg = 0;
+ u8 *key = NULL;
+ int err = 0;
+ u16 i;
+
+ switch (rxfh->hfunc) {
+ case ETH_RSS_HASH_NO_CHANGE:
+ break;
+ case ETH_RSS_HASH_TOP:
+ hash_alg = ETH_RSS_HASH_TOP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (rxfh->indir) {
+ lut_size = priv->rss_lut_size;
+ lut = dma_alloc_coherent(&priv->pdev->dev,
+ lut_size * sizeof(*lut),
+ &lut_bus, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->rss_lut_size; i++)
+ lut[i] = cpu_to_be32(rxfh->indir[i]);
+ }
+
+ if (rxfh->key) {
+ key_size = priv->rss_key_size;
+ key = dma_alloc_coherent(&priv->pdev->dev,
+ key_size, &key_bus, GFP_KERNEL);
+ if (!key) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(key, rxfh->key, key_size);
+ }
+
+ /* Zero-valued fields in the cmd.configure_rss instruct the device to
+ * not update those fields.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_RSS);
+ cmd.configure_rss = (struct gve_adminq_configure_rss) {
+ .hash_types = cpu_to_be16(BIT(GVE_RSS_HASH_TCPV4) |
+ BIT(GVE_RSS_HASH_UDPV4) |
+ BIT(GVE_RSS_HASH_TCPV6) |
+ BIT(GVE_RSS_HASH_UDPV6)),
+ .hash_alg = hash_alg,
+ .hash_key_size = cpu_to_be16(key_size),
+ .hash_lut_size = cpu_to_be16(lut_size),
+ .hash_key_addr = cpu_to_be64(key_bus),
+ .hash_lut_addr = cpu_to_be64(lut_bus),
+ };
+
+ err = gve_adminq_execute_cmd(priv, &cmd);
+
+out:
+ if (lut)
+ dma_free_coherent(&priv->pdev->dev,
+ lut_size * sizeof(*lut),
+ lut, lut_bus);
+ if (key)
+ dma_free_coherent(&priv->pdev->dev,
+ key_size, key, key_bus);
+ return err;
+}
+
/* In the dma memory that the driver allocated for the device to query the flow rules, the device
* will first write it with a struct of gve_query_flow_rules_descriptor. Next to it, the device
* will write an array of rules or rule ids with the count that specified in the descriptor.
@@ -1325,3 +1440,66 @@ out:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
return err;
}
+
+static int gve_adminq_process_rss_query(struct gve_priv *priv,
+ struct gve_query_rss_descriptor *descriptor,
+ struct ethtool_rxfh_param *rxfh)
+{
+ u32 total_memory_length;
+ u16 hash_lut_length;
+ void *rss_info_addr;
+ __be32 *lut;
+ u16 i;
+
+ total_memory_length = be32_to_cpu(descriptor->total_length);
+ hash_lut_length = priv->rss_lut_size * sizeof(*rxfh->indir);
+
+ if (sizeof(*descriptor) + priv->rss_key_size + hash_lut_length != total_memory_length) {
+ dev_err(&priv->dev->dev,
+ "rss query desc from device has invalid length parameter.\n");
+ return -EINVAL;
+ }
+
+ rxfh->hfunc = descriptor->hash_alg;
+
+ rss_info_addr = (void *)(descriptor + 1);
+ if (rxfh->key)
+ memcpy(rxfh->key, rss_info_addr, priv->rss_key_size);
+
+ rss_info_addr += priv->rss_key_size;
+ lut = (__be32 *)rss_info_addr;
+ if (rxfh->indir) {
+ for (i = 0; i < priv->rss_lut_size; i++)
+ rxfh->indir[i] = be32_to_cpu(lut[i]);
+ }
+
+ return 0;
+}
+
+int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_query_rss_descriptor *descriptor;
+ union gve_adminq_command cmd;
+ dma_addr_t descriptor_bus;
+ int err = 0;
+
+ descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, &descriptor_bus);
+ if (!descriptor)
+ return -ENOMEM;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_QUERY_RSS);
+ cmd.query_rss = (struct gve_adminq_query_rss) {
+ .available_length = cpu_to_be64(GVE_ADMINQ_BUFFER_SIZE),
+ .rss_descriptor_addr = cpu_to_be64(descriptor_bus),
+ };
+ err = gve_adminq_execute_cmd(priv, &cmd);
+ if (err)
+ goto out;
+
+ err = gve_adminq_process_rss_query(priv, descriptor, rxfh);
+
+out:
+ dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
+ return err;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index ed1370c9b197..863683de9694 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -20,12 +20,14 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_DESTROY_TX_QUEUE = 0x7,
GVE_ADMINQ_DESTROY_RX_QUEUE = 0x8,
GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9,
+ GVE_ADMINQ_CONFIGURE_RSS = 0xA,
GVE_ADMINQ_SET_DRIVER_PARAMETER = 0xB,
GVE_ADMINQ_REPORT_STATS = 0xC,
GVE_ADMINQ_REPORT_LINK_SPEED = 0xD,
GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF,
GVE_ADMINQ_QUERY_FLOW_RULES = 0x10,
+ GVE_ADMINQ_QUERY_RSS = 0x12,
/* For commands that are larger than 56 bytes */
GVE_ADMINQ_EXTENDED_COMMAND = 0xFF,
@@ -164,6 +166,14 @@ struct gve_device_option_flow_steering {
static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
+struct gve_device_option_rss_config {
+ __be32 supported_features_mask;
+ __be16 hash_key_size;
+ __be16 hash_lut_size;
+};
+
+static_assert(sizeof(struct gve_device_option_rss_config) == 8);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -182,6 +192,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
GVE_DEV_OPT_ID_FLOW_STEERING = 0xb,
+ GVE_DEV_OPT_ID_RSS_CONFIG = 0xe,
};
enum gve_dev_opt_req_feat_mask {
@@ -194,6 +205,7 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG = 0x0,
};
enum gve_sup_feature_mask {
@@ -201,6 +213,7 @@ enum gve_sup_feature_mask {
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
GVE_SUP_FLOW_STEERING_MASK = 1 << 5,
+ GVE_SUP_RSS_CONFIG_MASK = 1 << 7,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -214,6 +227,7 @@ enum gve_driver_capbility {
gve_driver_capability_dqo_rda = 3,
gve_driver_capability_alt_miss_compl = 4,
gve_driver_capability_flexible_buffer_size = 5,
+ gve_driver_capability_flexible_rss_size = 6,
};
#define GVE_CAP1(a) BIT((int)a)
@@ -226,7 +240,8 @@ enum gve_driver_capbility {
GVE_CAP1(gve_driver_capability_gqi_rda) | \
GVE_CAP1(gve_driver_capability_dqo_rda) | \
GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
- GVE_CAP1(gve_driver_capability_flexible_buffer_size))
+ GVE_CAP1(gve_driver_capability_flexible_buffer_size) | \
+ GVE_CAP1(gve_driver_capability_flexible_rss_size))
#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
@@ -509,6 +524,44 @@ struct gve_adminq_query_flow_rules {
static_assert(sizeof(struct gve_adminq_query_flow_rules) == 24);
+enum gve_rss_hash_type {
+ GVE_RSS_HASH_IPV4,
+ GVE_RSS_HASH_TCPV4,
+ GVE_RSS_HASH_IPV6,
+ GVE_RSS_HASH_IPV6_EX,
+ GVE_RSS_HASH_TCPV6,
+ GVE_RSS_HASH_TCPV6_EX,
+ GVE_RSS_HASH_UDPV4,
+ GVE_RSS_HASH_UDPV6,
+ GVE_RSS_HASH_UDPV6_EX,
+};
+
+struct gve_adminq_configure_rss {
+ __be16 hash_types;
+ u8 hash_alg;
+ u8 reserved;
+ __be16 hash_key_size;
+ __be16 hash_lut_size;
+ __be64 hash_key_addr;
+ __be64 hash_lut_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_configure_rss) == 24);
+
+struct gve_query_rss_descriptor {
+ __be32 total_length;
+ __be16 hash_types;
+ u8 hash_alg;
+ u8 reserved;
+};
+
+struct gve_adminq_query_rss {
+ __be64 available_length;
+ __be64 rss_descriptor_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_query_rss) == 16);
+
union gve_adminq_command {
struct {
__be32 opcode;
@@ -530,6 +583,8 @@ union gve_adminq_command {
struct gve_adminq_verify_driver_compatibility
verify_driver_compatibility;
struct gve_adminq_query_flow_rules query_flow_rules;
+ struct gve_adminq_configure_rss configure_rss;
+ struct gve_adminq_query_rss query_rss;
struct gve_adminq_extended_command extended_command;
};
};
@@ -568,6 +623,8 @@ int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule
int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc);
int gve_adminq_reset_flow_rules(struct gve_priv *priv);
int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc);
+int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
+int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
struct gve_ptype_lut;
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 5a8b490ab3ad..bdfc6e77b2af 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -75,7 +75,8 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
"adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
"adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
- "adminq_query_flow_rules", "adminq_cfg_flow_rule",
+ "adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt",
+ "adminq_query_rss_cnt",
};
static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
@@ -453,6 +454,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = priv->adminq_get_ptype_map_cnt;
data[i++] = priv->adminq_query_flow_rules_cnt;
data[i++] = priv->adminq_cfg_flow_rule_cnt;
+ data[i++] = priv->adminq_cfg_rss_cnt;
+ data[i++] = priv->adminq_query_rss_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -838,6 +841,41 @@ static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u
return err;
}
+static u32 gve_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ return priv->rss_key_size;
+}
+
+static u32 gve_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ return priv->rss_lut_size;
+}
+
+static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (!priv->rss_key_size || !priv->rss_lut_size)
+ return -EOPNOTSUPP;
+
+ return gve_adminq_query_rss_config(priv, rxfh);
+}
+
+static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (!priv->rss_key_size || !priv->rss_lut_size)
+ return -EOPNOTSUPP;
+
+ return gve_adminq_configure_rss(priv, rxfh);
+}
+
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
@@ -851,6 +889,10 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_channels = gve_get_channels,
.set_rxnfc = gve_set_rxnfc,
.get_rxnfc = gve_get_rxnfc,
+ .get_rxfh_indir_size = gve_get_rxfh_indir_size,
+ .get_rxfh_key_size = gve_get_rxfh_key_size,
+ .get_rxfh = gve_get_rxfh,
+ .set_rxfh = gve_set_rxfh,
.get_link = ethtool_op_get_link,
.get_coalesce = gve_get_coalesce,
.set_coalesce = gve_set_coalesce,
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index b91e7a06b97f..beb815e5289b 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -947,6 +947,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv->tx_coalesce_timer.function = tx_done;
priv->map = syscon_node_to_regmap(arg.np);
+ of_node_put(arg.np);
if (IS_ERR(priv->map)) {
dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
ret = PTR_ERR(priv->map);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index f75668c47935..58baac7103b3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -734,7 +734,7 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
return -ENODATA;
phy = get_phy_device(mdio, addr, is_c45);
- if (!phy || IS_ERR(phy))
+ if (IS_ERR_OR_NULL(phy))
return -EIO;
phy->irq = mdio->irq[addr];
@@ -933,6 +933,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
mac_cb->cpld_ctrl = NULL;
} else {
syscon = syscon_node_to_regmap(cpld_args.np);
+ of_node_put(cpld_args.np);
if (IS_ERR_OR_NULL(syscon)) {
dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
mac_cb->cpld_ctrl = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a5fc0209d628..4cbc4d069a1f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -5724,6 +5724,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ hns3_nic_net_stop(netdev);
+
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index e132c2f09560..cc7f46c0b35f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1598,8 +1598,7 @@ static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev)
{
u32 loop_para[HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE] = {0};
struct hclge_mod_reg_common_msg msg;
- u8 i, j, num;
- u32 loop_time;
+ u8 i, j, num, loop_time;
num = ARRAY_SIZE(hclge_ssu_reg_common_msg);
for (i = 0; i < num; i++) {
@@ -1609,7 +1608,8 @@ static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev)
loop_time = 1;
loop_para[0] = 0;
if (msg.need_para) {
- loop_time = hdev->ae_dev->dev_specs.tnl_num;
+ loop_time = min(hdev->ae_dev->dev_specs.tnl_num,
+ HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE);
for (j = 0; j < loop_time; j++)
loop_para[j] = j + 1;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 82574ce0194f..bd86efd92a5a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -13,8 +13,9 @@
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/crash_dump.h>
-#include <net/ipv6.h>
+
#include <net/rtnetlink.h>
+
#include "hclge_cmd.h"
#include "hclge_dcb.h"
#include "hclge_main.h"
@@ -2653,8 +2654,17 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
+
+ if (ret)
+ return ret;
+
+ hdev->hw.mac.req_speed = speed;
+ hdev->hw.mac.req_duplex = duplex;
- return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
+ return 0;
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2956,17 +2966,20 @@ static int hclge_mac_init(struct hclge_dev *hdev)
if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
- ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
- hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
- if (ret)
- return ret;
-
if (hdev->hw.mac.support_autoneg) {
ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
if (ret)
return ret;
}
+ if (!hdev->hw.mac.autoneg) {
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed,
+ hdev->hw.mac.req_duplex,
+ hdev->hw.mac.lane_num);
+ if (ret)
+ return ret;
+ }
+
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
@@ -6278,15 +6291,15 @@ static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs,
static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule, u8 ip_proto)
{
- be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ fs->h_u.tcp_ip6_spec.ip6src);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ fs->m_u.tcp_ip6_spec.ip6src);
- be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ fs->h_u.tcp_ip6_spec.ip6dst);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ fs->m_u.tcp_ip6_spec.ip6dst);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
@@ -6307,15 +6320,15 @@ static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs,
static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule)
{
- be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ fs->h_u.usr_ip6_spec.ip6src);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ fs->m_u.usr_ip6_spec.ip6src);
- be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ fs->h_u.usr_ip6_spec.ip6dst);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ fs->m_u.usr_ip6_spec.ip6dst);
rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
@@ -6744,21 +6757,19 @@ static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
struct ethtool_tcpip6_spec *spec,
struct ethtool_tcpip6_spec *spec_mask)
{
- cpu_to_be32_array(spec->ip6src,
- rule->tuples.src_ip, IPV6_SIZE);
- cpu_to_be32_array(spec->ip6dst,
- rule->tuples.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip);
+ ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
else
- cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
- IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6src,
+ rule->tuples_mask.src_ip);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
else
- cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
- IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip);
spec->tclass = rule->tuples.ip_tos;
spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
@@ -6777,19 +6788,19 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
struct ethtool_usrip6_spec *spec,
struct ethtool_usrip6_spec *spec_mask)
{
- cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
- cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip);
+ ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
else
- cpu_to_be32_array(spec_mask->ip6src,
- rule->tuples_mask.src_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6src,
+ rule->tuples_mask.src_ip);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
else
- cpu_to_be32_array(spec_mask->ip6dst,
- rule->tuples_mask.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip);
spec->tclass = rule->tuples.ip_tos;
spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
@@ -7007,7 +7018,7 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
} else {
int i;
- for (i = 0; i < IPV6_SIZE; i++) {
+ for (i = 0; i < IPV6_ADDR_WORDS; i++) {
tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
}
@@ -7262,14 +7273,14 @@ static int hclge_get_cls_key_ip(const struct flow_rule *flow,
struct flow_match_ipv6_addrs match;
flow_rule_match_ipv6_addrs(flow, &match);
- be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip,
- match.mask->src.s6_addr32, IPV6_SIZE);
- be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip,
- match.mask->dst.s6_addr32, IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ match.key->src.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ match.mask->src.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ match.key->dst.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ match.mask->dst.s6_addr32);
} else {
rule->unused_tuple |= BIT(INNER_SRC_IP);
rule->unused_tuple |= BIT(INNER_DST_IP);
@@ -11444,7 +11455,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_free_irq_vectors(pdev);
- pci_release_mem_regions(pdev);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
}
@@ -11516,8 +11527,8 @@ static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
hdev->reset_type = HNAE3_NONE_RESET;
- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
+ if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ up(&hdev->reset_sem);
}
static void hclge_clear_resetting_state(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index b5178b0f88b3..b9fc719880bb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -8,7 +8,9 @@
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/kfifo.h>
+
#include <net/devlink.h>
+#include <net/ipv6.h>
#include "hclge_cmd.h"
#include "hclge_ptp.h"
@@ -718,15 +720,15 @@ struct hclge_fd_cfg {
};
#define IPV4_INDEX 3
-#define IPV6_SIZE 4
+
struct hclge_fd_rule_tuples {
u8 src_mac[ETH_ALEN];
u8 dst_mac[ETH_ALEN];
/* Be compatible for ip address of both ipv4 and ipv6.
* For ipv4 address, we store it in src/dst_ip[3].
*/
- u32 src_ip[IPV6_SIZE];
- u32 dst_ip[IPV6_SIZE];
+ u32 src_ip[IPV6_ADDR_WORDS];
+ u32 dst_ip[IPV6_ADDR_WORDS];
u16 src_port;
u16 dst_port;
u16 vlan_tag1;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 85fb11de43a1..80079657afeb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -191,6 +191,9 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
+ hdev->hw.mac.req_speed = (u32)speed;
+ hdev->hw.mac.req_duplex = (u8)duplex;
+
ret = hclge_cfg_flowctrl(hdev);
if (ret)
netdev_err(netdev, "failed to configure flow control.\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 5fff8ed388f8..5505caea88e9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -389,16 +389,12 @@ int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
}
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (hdev->ptp->clock)
info->phc_index = ptp_clock_index(hdev->ptp->clock);
- else
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 3735d2fed11f..094a7c7b5592 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1747,8 +1747,8 @@ static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
ret);
hdev->reset_type = HNAE3_NONE_RESET;
- clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
+ if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ up(&hdev->reset_sem);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
index 65b9dcd38137..6db415d8b917 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -134,17 +134,17 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
reg += hclgevf_reg_get_header(reg);
/* fetching per-VF registers values from VF PCIe register space */
- reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
+ reg_um = ARRAY_SIZE(cmdq_reg_addr_list);
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_um, reg);
for (i = 0; i < reg_um; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
- reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
+ reg_um = ARRAY_SIZE(common_reg_addr_list);
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_um, reg);
for (i = 0; i < reg_um; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
- reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
+ reg_um = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < hdev->num_tqps; j++) {
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
for (i = 0; i < reg_um; i++)
@@ -153,7 +153,7 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
HCLGEVF_RING_REG_OFFSET * j);
}
- reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
+ reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
for (j = 0; j < hdev->num_msi_used - 1; j++) {
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_um, reg);
for (i = 0; i < reg_um; i++)
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index ed73707176c1..8a047145f0c5 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -575,6 +575,7 @@ static int hns_mdio_probe(struct platform_device *pdev)
MDIO_SC_RESET_ST;
}
}
+ of_node_put(reg_args.np);
} else {
dev_warn(&pdev->dev, "find syscon ret = %#x\n", ret);
mdio_dev->subctrl_vbase = NULL;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 0304f03d4093..c559dd4291d3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -1471,7 +1471,6 @@ static void hinic_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
- char *p = (char *)data;
u16 i, j;
switch (stringset) {
@@ -1479,31 +1478,19 @@ static void hinic_get_strings(struct net_device *netdev,
memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
return;
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) {
- memcpy(p, hinic_function_stats[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++)
+ ethtool_puts(&data, hinic_function_stats[i].name);
- for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) {
- memcpy(p, hinic_port_stats[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++)
+ ethtool_puts(&data, hinic_port_stats[i].name);
- for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) {
- sprintf(p, hinic_tx_queue_stats[j].name, i);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < nic_dev->num_qps; i++)
+ for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++)
+ ethtool_sprintf(&data, hinic_tx_queue_stats[j].name, i);
- for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) {
- sprintf(p, hinic_rx_queue_stats[j].name, i);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < nic_dev->num_qps; i++)
+ for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++)
+ ethtool_sprintf(&data, hinic_rx_queue_stats[j].name, i);
return;
default:
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 1e29e5c9a2df..c41c3f1cc506 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3063,14 +3063,13 @@ static void ehea_shutdown_single_port(struct ehea_port *port)
static int ehea_setup_ports(struct ehea_adapter *adapter)
{
struct device_node *lhea_dn;
- struct device_node *eth_dn = NULL;
+ struct device_node *eth_dn;
const u32 *dn_log_port_id;
int i = 0;
lhea_dn = adapter->ofdev->dev.of_node;
- while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
-
+ for_each_child_of_node(lhea_dn, eth_dn) {
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
NULL);
if (!dn_log_port_id) {
@@ -3102,12 +3101,11 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
u32 logical_port_id)
{
struct device_node *lhea_dn;
- struct device_node *eth_dn = NULL;
+ struct device_node *eth_dn;
const u32 *dn_log_port_id;
lhea_dn = adapter->ofdev->dev.of_node;
- while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
-
+ for_each_child_of_node(lhea_dn, eth_dn) {
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
NULL);
if (dn_log_port_id)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index a19d098f2e2b..dac570f3c110 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -32,7 +32,6 @@
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/bitops.h>
-#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -96,11 +95,6 @@ MODULE_LICENSE("GPL");
static u32 busy_phy_map;
static DEFINE_MUTEX(emac_phy_map_lock);
-/* This is the wait queue used to wait on any event related to probe, that
- * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
- */
-static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
-
/* Having stable interface names is a doomed idea. However, it would be nice
* if we didn't have completely random interface names at boot too :-) It's
* just a matter of making everybody's life easier. Since we are doing
@@ -116,9 +110,6 @@ static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
#define EMAC_BOOT_LIST_SIZE 4
static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
-/* How long should I wait for dependent devices ? */
-#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
-
/* I don't want to litter system log with timeout errors
* when we have brain-damaged PHY.
*/
@@ -418,8 +409,8 @@ do_retry:
static void emac_hash_mc(struct emac_instance *dev)
{
+ u32 __iomem *gaht_base = emac_gaht_base(dev);
const int regs = EMAC_XAHT_REGS(dev);
- u32 *gaht_base = emac_gaht_base(dev);
u32 gaht_temp[EMAC_XAHT_MAX_REGS];
struct netdev_hw_addr *ha;
int i;
@@ -973,8 +964,6 @@ static void __emac_set_multicast_list(struct emac_instance *dev)
* we need is just to stop RX channel. This seems to work on all
* tested SoCs. --ebs
*
- * If we need the full reset, we might just trigger the workqueue
- * and do it async... a bit nasty but should work --BenH
*/
dev->mcast_pending = 0;
emac_rx_disable(dev);
@@ -1228,18 +1217,10 @@ static void emac_print_link_status(struct emac_instance *dev)
static int emac_open(struct net_device *ndev)
{
struct emac_instance *dev = netdev_priv(ndev);
- int err, i;
+ int i;
DBG(dev, "open" NL);
- /* Setup error IRQ handler */
- err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
- if (err) {
- printk(KERN_ERR "%s: failed to request IRQ %d\n",
- ndev->name, dev->emac_irq);
- return err;
- }
-
/* Allocate RX ring */
for (i = 0; i < NUM_RX_BUFF; ++i)
if (emac_alloc_rx_skb(dev, i)) {
@@ -1293,8 +1274,6 @@ static int emac_open(struct net_device *ndev)
return 0;
oom:
emac_clean_rx_ring(dev);
- free_irq(dev->emac_irq, dev);
-
return -ENOMEM;
}
@@ -1408,8 +1387,6 @@ static int emac_close(struct net_device *ndev)
emac_clean_tx_ring(dev);
emac_clean_rx_ring(dev);
- free_irq(dev->emac_irq, dev);
-
netif_carrier_off(ndev);
return 0;
@@ -2390,7 +2367,9 @@ static int emac_check_deps(struct emac_instance *dev,
if (deps[i].drvdata != NULL)
there++;
}
- return there == EMAC_DEP_COUNT;
+ if (there != EMAC_DEP_COUNT)
+ return -EPROBE_DEFER;
+ return 0;
}
static void emac_put_deps(struct emac_instance *dev)
@@ -2402,19 +2381,6 @@ static void emac_put_deps(struct emac_instance *dev)
platform_device_put(dev->tah_dev);
}
-static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
- void *data)
-{
- /* We are only intereted in device addition */
- if (action == BUS_NOTIFY_BOUND_DRIVER)
- wake_up_all(&emac_probe_wait);
- return 0;
-}
-
-static struct notifier_block emac_of_bus_notifier = {
- .notifier_call = emac_of_bus_notify
-};
-
static int emac_wait_deps(struct emac_instance *dev)
{
struct emac_depentry deps[EMAC_DEP_COUNT];
@@ -2431,18 +2397,13 @@ static int emac_wait_deps(struct emac_instance *dev)
deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
if (dev->blist && dev->blist > emac_boot_list)
deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
- bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
- wait_event_timeout(emac_probe_wait,
- emac_check_deps(dev, deps),
- EMAC_PROBE_DEP_TIMEOUT);
- bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
- err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
+ err = emac_check_deps(dev, deps);
for (i = 0; i < EMAC_DEP_COUNT; i++) {
of_node_put(deps[i].node);
if (err)
platform_device_put(deps[i].ofdev);
}
- if (err == 0) {
+ if (!err) {
dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
@@ -2456,22 +2417,21 @@ static int emac_wait_deps(struct emac_instance *dev)
static int emac_read_uint_prop(struct device_node *np, const char *name,
u32 *val, int fatal)
{
- int len;
- const u32 *prop = of_get_property(np, name, &len);
- if (prop == NULL || len < sizeof(u32)) {
+ int err;
+
+ err = of_property_read_u32(np, name, val);
+ if (err) {
if (fatal)
- printk(KERN_ERR "%pOF: missing %s property\n",
- np, name);
- return -ENODEV;
+ pr_err("%pOF: missing %s property", np, name);
+ return err;
}
- *val = *prop;
return 0;
}
static void emac_adjust_link(struct net_device *ndev)
{
struct emac_instance *dev = netdev_priv(ndev);
- struct phy_device *phy = dev->phy_dev;
+ struct phy_device *phy = ndev->phydev;
dev->phy.autoneg = phy->autoneg;
dev->phy.speed = phy->speed;
@@ -2522,22 +2482,20 @@ static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
{
struct net_device *ndev = phy->dev;
- struct emac_instance *dev = netdev_priv(ndev);
phy->autoneg = AUTONEG_ENABLE;
phy->advertising = advertise;
- return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
+ return emac_mdio_phy_start_aneg(phy, ndev->phydev);
}
static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
{
struct net_device *ndev = phy->dev;
- struct emac_instance *dev = netdev_priv(ndev);
phy->autoneg = AUTONEG_DISABLE;
phy->speed = speed;
phy->duplex = fd;
- return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
+ return emac_mdio_phy_start_aneg(phy, ndev->phydev);
}
static int emac_mdio_poll_link(struct mii_phy *phy)
@@ -2546,20 +2504,19 @@ static int emac_mdio_poll_link(struct mii_phy *phy)
struct emac_instance *dev = netdev_priv(ndev);
int res;
- res = phy_read_status(dev->phy_dev);
+ res = phy_read_status(ndev->phydev);
if (res) {
dev_err(&dev->ofdev->dev, "link update failed (%d).", res);
return ethtool_op_get_link(ndev);
}
- return dev->phy_dev->link;
+ return ndev->phydev->link;
}
static int emac_mdio_read_link(struct mii_phy *phy)
{
struct net_device *ndev = phy->dev;
- struct emac_instance *dev = netdev_priv(ndev);
- struct phy_device *phy_dev = dev->phy_dev;
+ struct phy_device *phy_dev = ndev->phydev;
int res;
res = phy_read_status(phy_dev);
@@ -2576,10 +2533,9 @@ static int emac_mdio_read_link(struct mii_phy *phy)
static int emac_mdio_init_phy(struct mii_phy *phy)
{
struct net_device *ndev = phy->dev;
- struct emac_instance *dev = netdev_priv(ndev);
- phy_start(dev->phy_dev);
- return phy_init_hw(dev->phy_dev);
+ phy_start(ndev->phydev);
+ return phy_init_hw(ndev->phydev);
}
static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
@@ -2593,6 +2549,7 @@ static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
static int emac_dt_mdio_probe(struct emac_instance *dev)
{
struct device_node *mii_np;
+ struct mii_bus *bus;
int res;
mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
@@ -2606,23 +2563,23 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
goto put_node;
}
- dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev);
- if (!dev->mii_bus) {
+ bus = devm_mdiobus_alloc(&dev->ofdev->dev);
+ if (!bus) {
res = -ENOMEM;
goto put_node;
}
- dev->mii_bus->priv = dev->ndev;
- dev->mii_bus->parent = dev->ndev->dev.parent;
- dev->mii_bus->name = "emac_mdio";
- dev->mii_bus->read = &emac_mii_bus_read;
- dev->mii_bus->write = &emac_mii_bus_write;
- dev->mii_bus->reset = &emac_mii_bus_reset;
- snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
- res = of_mdiobus_register(dev->mii_bus, mii_np);
+ bus->priv = dev->ndev;
+ bus->parent = dev->ndev->dev.parent;
+ bus->name = "emac_mdio";
+ bus->read = &emac_mii_bus_read;
+ bus->write = &emac_mii_bus_write;
+ bus->reset = &emac_mii_bus_reset;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
+ res = devm_of_mdiobus_register(&dev->ofdev->dev, bus, mii_np);
if (res) {
dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)",
- dev->mii_bus->name, res);
+ bus->name, res);
}
put_node:
@@ -2633,26 +2590,28 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
static int emac_dt_phy_connect(struct emac_instance *dev,
struct device_node *phy_handle)
{
+ struct phy_device *phy_dev;
+
dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
GFP_KERNEL);
if (!dev->phy.def)
return -ENOMEM;
- dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link,
- 0, dev->phy_mode);
- if (!dev->phy_dev) {
+ phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link, 0,
+ dev->phy_mode);
+ if (!phy_dev) {
dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n");
return -ENODEV;
}
- dev->phy.def->phy_id = dev->phy_dev->drv->phy_id;
- dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
- dev->phy.def->name = dev->phy_dev->drv->name;
+ dev->phy.def->phy_id = phy_dev->drv->phy_id;
+ dev->phy.def->phy_id_mask = phy_dev->drv->phy_id_mask;
+ dev->phy.def->name = phy_dev->drv->name;
dev->phy.def->ops = &emac_dt_mdio_phy_ops;
ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features,
- dev->phy_dev->supported);
- dev->phy.address = dev->phy_dev->mdio.addr;
- dev->phy.mode = dev->phy_dev->interface;
+ phy_dev->supported);
+ dev->phy.address = phy_dev->mdio.addr;
+ dev->phy.mode = phy_dev->interface;
return 0;
}
@@ -2668,8 +2627,6 @@ static int emac_dt_phy_probe(struct emac_instance *dev)
res = emac_dt_mdio_probe(dev);
if (!res) {
res = emac_dt_phy_connect(dev, phy_handle);
- if (res)
- mdiobus_unregister(dev->mii_bus);
}
}
@@ -2708,13 +2665,11 @@ static int emac_init_phy(struct emac_instance *dev)
return res;
res = of_phy_register_fixed_link(np);
- dev->phy_dev = of_phy_find_device(np);
- if (res || !dev->phy_dev) {
- mdiobus_unregister(dev->mii_bus);
+ ndev->phydev = of_phy_find_device(np);
+ if (res || !ndev->phydev)
return res ? res : -EINVAL;
- }
emac_adjust_link(dev->ndev);
- put_device(&dev->phy_dev->mdio.dev);
+ put_device(&ndev->phydev->mdio.dev);
}
return 0;
}
@@ -3053,7 +3008,7 @@ static int emac_probe(struct platform_device *ofdev)
/* Allocate our net_device structure */
err = -ENOMEM;
- ndev = alloc_etherdev(sizeof(struct emac_instance));
+ ndev = devm_alloc_etherdev(&ofdev->dev, sizeof(struct emac_instance));
if (!ndev)
goto err_gone;
@@ -3072,35 +3027,40 @@ static int emac_probe(struct platform_device *ofdev)
/* Init various config data based on device-tree */
err = emac_init_config(dev);
if (err)
- goto err_free;
+ goto err_gone;
- /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
+ /* Get interrupts. EMAC irq is mandatory */
dev->emac_irq = irq_of_parse_and_map(np, 0);
- dev->wol_irq = irq_of_parse_and_map(np, 1);
if (!dev->emac_irq) {
printk(KERN_ERR "%pOF: Can't map main interrupt\n", np);
err = -ENODEV;
- goto err_free;
+ goto err_gone;
+ }
+
+ /* Setup error IRQ handler */
+ err = devm_request_irq(&ofdev->dev, dev->emac_irq, emac_irq, 0, "EMAC",
+ dev);
+ if (err) {
+ dev_err_probe(&ofdev->dev, err, "failed to request IRQ %d",
+ dev->emac_irq);
+ goto err_gone;
}
+
ndev->irq = dev->emac_irq;
/* Map EMAC regs */
// TODO : platform_get_resource() and devm_ioremap_resource()
- dev->emacp = of_iomap(np, 0);
- if (dev->emacp == NULL) {
- printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
+ dev->emacp = devm_of_iomap(&ofdev->dev, np, 0, NULL);
+ if (!dev->emacp) {
+ dev_err(&ofdev->dev, "can't map device registers");
err = -ENOMEM;
- goto err_irq_unmap;
+ goto err_gone;
}
/* Wait for dependent devices */
err = emac_wait_deps(dev);
- if (err) {
- printk(KERN_ERR
- "%pOF: Timeout waiting for dependent devices\n", np);
- /* display more info about what's missing ? */
- goto err_reg_unmap;
- }
+ if (err)
+ goto err_gone;
dev->mal = platform_get_drvdata(dev->mal_dev);
if (dev->mdio_dev != NULL)
dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
@@ -3187,7 +3147,7 @@ static int emac_probe(struct platform_device *ofdev)
netif_carrier_off(ndev);
- err = register_netdev(ndev);
+ err = devm_register_netdev(&ofdev->dev, ndev);
if (err) {
printk(KERN_ERR "%pOF: failed to register net device (%d)!\n",
np, err);
@@ -3200,10 +3160,6 @@ static int emac_probe(struct platform_device *ofdev)
wmb();
platform_set_drvdata(ofdev, dev);
- /* There's a new kid in town ! Let's tell everybody */
- wake_up_all(&emac_probe_wait);
-
-
printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
ndev->name, dev->cell_index, np, ndev->dev_addr);
@@ -3232,24 +3188,9 @@ static int emac_probe(struct platform_device *ofdev)
mal_unregister_commac(dev->mal, &dev->commac);
err_rel_deps:
emac_put_deps(dev);
- err_reg_unmap:
- iounmap(dev->emacp);
- err_irq_unmap:
- if (dev->wol_irq)
- irq_dispose_mapping(dev->wol_irq);
- if (dev->emac_irq)
- irq_dispose_mapping(dev->emac_irq);
- err_free:
- free_netdev(ndev);
err_gone:
- /* if we were on the bootlist, remove us as we won't show up and
- * wake up all waiters to notify them in case they were waiting
- * on us
- */
- if (blist) {
+ if (blist)
*blist = NULL;
- wake_up_all(&emac_probe_wait);
- }
return err;
}
@@ -3259,8 +3200,6 @@ static void emac_remove(struct platform_device *ofdev)
DBG(dev, "remove" NL);
- unregister_netdev(dev->ndev);
-
cancel_work_sync(&dev->reset_work);
if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
@@ -3270,26 +3209,11 @@ static void emac_remove(struct platform_device *ofdev)
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
zmii_detach(dev->zmii_dev, dev->zmii_port);
- if (dev->phy_dev)
- phy_disconnect(dev->phy_dev);
-
- if (dev->mii_bus)
- mdiobus_unregister(dev->mii_bus);
-
busy_phy_map &= ~(1 << dev->phy.address);
DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
mal_unregister_commac(dev->mal, &dev->commac);
emac_put_deps(dev);
-
- iounmap(dev->emacp);
-
- if (dev->wol_irq)
- irq_dispose_mapping(dev->wol_irq);
- if (dev->emac_irq)
- irq_dispose_mapping(dev->emac_irq);
-
- free_netdev(dev->ndev);
}
/* XXX Features in here should be replaced by properties... */
@@ -3328,16 +3252,15 @@ static void __init emac_make_bootlist(void)
/* Collect EMACs */
while((np = of_find_all_nodes(np)) != NULL) {
- const u32 *idx;
+ u32 idx;
if (of_match_node(emac_match, np) == NULL)
continue;
if (of_property_read_bool(np, "unused"))
continue;
- idx = of_get_property(np, "cell-index", NULL);
- if (idx == NULL)
+ if (of_property_read_u32(np, "cell-index", &idx))
continue;
- cell_indices[i] = *idx;
+ cell_indices[i] = idx;
emac_boot_list[i++] = of_node_get(np);
if (i >= EMAC_BOOT_LIST_SIZE) {
of_node_put(np);
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 295516b07662..89fa1683ec3c 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -188,10 +188,6 @@ struct emac_instance {
struct emac_instance *mdio_instance;
struct mutex mdio_lock;
- /* Device-tree based phy configuration */
- struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
-
/* ZMII infos if any */
u32 zmii_ph;
u32 zmii_port;
@@ -400,7 +396,7 @@ static inline int emac_has_feature(struct emac_instance *dev,
((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \
((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1)))
-static inline u32 *emac_xaht_base(struct emac_instance *dev)
+static inline u32 __iomem *emac_xaht_base(struct emac_instance *dev)
{
struct emac_regs __iomem *p = dev->emacp;
int offset;
@@ -413,10 +409,10 @@ static inline u32 *emac_xaht_base(struct emac_instance *dev)
else
offset = offsetof(struct emac_regs, u0.emac4.iaht1);
- return (u32 *)((ptrdiff_t)p + offset);
+ return (u32 __iomem *)((__force ptrdiff_t)p + offset);
}
-static inline u32 *emac_gaht_base(struct emac_instance *dev)
+static inline u32 __iomem *emac_gaht_base(struct emac_instance *dev)
{
/* GAHT registers always come after an identical number of
* IAHT registers.
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index d92dd9c83031..99d5f83f7c60 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -578,7 +578,7 @@ static int mal_probe(struct platform_device *ofdev)
printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n",
ofdev->dev.of_node);
err = -ENODEV;
- goto fail;
+ goto fail_unmap;
#endif
}
@@ -742,6 +742,8 @@ static void mal_remove(struct platform_device *ofdev)
free_netdev(mal->dummy_dev);
+ dcr_unmap(mal->dcr_host, 0x100);
+
dma_free_coherent(&ofdev->dev,
sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 4c9d9badd698..b619a3ec245b 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -39,7 +39,8 @@
#include "ibmveth.h"
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
+static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
+ bool reuse);
static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
static struct kobj_type ktype_veth_pool;
@@ -226,6 +227,16 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
for (i = 0; i < count; ++i) {
union ibmveth_buf_desc desc;
+ free_index = pool->consumer_index;
+ index = pool->free_map[free_index];
+ skb = NULL;
+
+ BUG_ON(index == IBM_VETH_INVALID_MAP);
+
+ /* are we allocating a new buffer or recycling an old one */
+ if (pool->skbuff[index])
+ goto reuse;
+
skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
if (!skb) {
@@ -235,46 +246,46 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
break;
}
- free_index = pool->consumer_index;
- pool->consumer_index++;
- if (pool->consumer_index >= pool->size)
- pool->consumer_index = 0;
- index = pool->free_map[free_index];
-
- BUG_ON(index == IBM_VETH_INVALID_MAP);
- BUG_ON(pool->skbuff[index] != NULL);
-
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
pool->buff_size, DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
goto failure;
- pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
pool->dma_addr[index] = dma_addr;
pool->skbuff[index] = skb;
- correlator = ((u64)pool->index << 32) | index;
- *(u64 *)skb->data = correlator;
-
- desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
- desc.fields.address = dma_addr;
-
if (rx_flush) {
unsigned int len = min(pool->buff_size,
- adapter->netdev->mtu +
- IBMVETH_BUFF_OH);
+ adapter->netdev->mtu +
+ IBMVETH_BUFF_OH);
ibmveth_flush_buffer(skb->data, len);
}
+reuse:
+ dma_addr = pool->dma_addr[index];
+ desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
+ desc.fields.address = dma_addr;
+
+ correlator = ((u64)pool->index << 32) | index;
+ *(u64 *)pool->skbuff[index]->data = correlator;
+
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
desc.desc);
if (lpar_rc != H_SUCCESS) {
+ netdev_warn(adapter->netdev,
+ "%sadd_logical_lan failed %lu\n",
+ skb ? "" : "When recycling: ", lpar_rc);
goto failure;
- } else {
- buffers_added++;
- adapter->replenish_add_buff_success++;
}
+
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
+ pool->consumer_index++;
+ if (pool->consumer_index >= pool->size)
+ pool->consumer_index = 0;
+
+ buffers_added++;
+ adapter->replenish_add_buff_success++;
}
mb();
@@ -282,17 +293,13 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
return;
failure:
- pool->free_map[free_index] = index;
- pool->skbuff[index] = NULL;
- if (pool->consumer_index == 0)
- pool->consumer_index = pool->size - 1;
- else
- pool->consumer_index--;
- if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
+
+ if (dma_addr && !dma_mapping_error(&adapter->vdev->dev, dma_addr))
dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[index], pool->buff_size,
DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(pool->skbuff[index]);
+ pool->skbuff[index] = NULL;
adapter->replenish_add_buff_failure++;
mb();
@@ -365,7 +372,7 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
/* remove a buffer from a pool */
static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
- u64 correlator)
+ u64 correlator, bool reuse)
{
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
@@ -376,15 +383,23 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
BUG_ON(index >= adapter->rx_buff_pool[pool].size);
skb = adapter->rx_buff_pool[pool].skbuff[index];
-
BUG_ON(skb == NULL);
- adapter->rx_buff_pool[pool].skbuff[index] = NULL;
+ /* if we are going to reuse the buffer then keep the pointers around
+ * but mark index as available. replenish will see the skb pointer and
+ * assume it is to be recycled.
+ */
+ if (!reuse) {
+ /* remove the skb pointer to mark free. actual freeing is done
+ * by upper level networking after gro_recieve
+ */
+ adapter->rx_buff_pool[pool].skbuff[index] = NULL;
- dma_unmap_single(&adapter->vdev->dev,
- adapter->rx_buff_pool[pool].dma_addr[index],
- adapter->rx_buff_pool[pool].buff_size,
- DMA_FROM_DEVICE);
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->rx_buff_pool[pool].dma_addr[index],
+ adapter->rx_buff_pool[pool].buff_size,
+ DMA_FROM_DEVICE);
+ }
free_index = adapter->rx_buff_pool[pool].producer_index;
adapter->rx_buff_pool[pool].producer_index++;
@@ -411,51 +426,13 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
return adapter->rx_buff_pool[pool].skbuff[index];
}
-/* recycle the current buffer on the rx queue */
-static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
+static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
+ bool reuse)
{
- u32 q_index = adapter->rx_queue.index;
- u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
- unsigned int pool = correlator >> 32;
- unsigned int index = correlator & 0xffffffffUL;
- union ibmveth_buf_desc desc;
- unsigned long lpar_rc;
- int ret = 1;
-
- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
- BUG_ON(index >= adapter->rx_buff_pool[pool].size);
-
- if (!adapter->rx_buff_pool[pool].active) {
- ibmveth_rxq_harvest_buffer(adapter);
- ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
- goto out;
- }
-
- desc.fields.flags_len = IBMVETH_BUF_VALID |
- adapter->rx_buff_pool[pool].buff_size;
- desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
-
- lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
-
- if (lpar_rc != H_SUCCESS) {
- netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
- "during recycle rc=%ld", lpar_rc);
- ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
- ret = 0;
- }
-
- if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
- adapter->rx_queue.index = 0;
- adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
- }
-
-out:
- return ret;
-}
+ u64 cor;
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
-{
- ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
+ cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
+ ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
adapter->rx_queue.index = 0;
@@ -1337,6 +1314,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
unsigned long lpar_rc;
u16 mss = 0;
+restart_poll:
while (frames_processed < budget) {
if (!ibmveth_rxq_pending_buffer(adapter))
break;
@@ -1346,7 +1324,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
wmb(); /* suggested by larson1 */
adapter->rx_invalid_buffer++;
netdev_dbg(netdev, "recycling invalid buffer\n");
- ibmveth_rxq_recycle_buffer(adapter);
+ ibmveth_rxq_harvest_buffer(adapter, true);
} else {
struct sk_buff *skb, *new_skb;
int length = ibmveth_rxq_frame_length(adapter);
@@ -1379,11 +1357,10 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
if (rx_flush)
ibmveth_flush_buffer(skb->data,
length + offset);
- if (!ibmveth_rxq_recycle_buffer(adapter))
- kfree_skb(skb);
+ ibmveth_rxq_harvest_buffer(adapter, true);
skb = new_skb;
} else {
- ibmveth_rxq_harvest_buffer(adapter);
+ ibmveth_rxq_harvest_buffer(adapter, false);
skb_reserve(skb, offset);
}
@@ -1420,24 +1397,25 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
ibmveth_replenish_task(adapter);
- if (frames_processed < budget) {
- napi_complete_done(napi, frames_processed);
+ if (frames_processed == budget)
+ goto out;
- /* We think we are done - reenable interrupts,
- * then check once more to make sure we are done.
- */
- lpar_rc = h_vio_signal(adapter->vdev->unit_address,
- VIO_IRQ_ENABLE);
+ if (!napi_complete_done(napi, frames_processed))
+ goto out;
- BUG_ON(lpar_rc != H_SUCCESS);
+ /* We think we are done - reenable interrupts,
+ * then check once more to make sure we are done.
+ */
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
+ BUG_ON(lpar_rc != H_SUCCESS);
- if (ibmveth_rxq_pending_buffer(adapter) &&
- napi_schedule(napi)) {
- lpar_rc = h_vio_signal(adapter->vdev->unit_address,
- VIO_IRQ_DISABLE);
- }
+ if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) {
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address,
+ VIO_IRQ_DISABLE);
+ goto restart_poll;
}
+out:
return frames_processed;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 23ebeb143987..97425c06e1ed 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -117,6 +117,7 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb);
static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
static void flush_reset_queue(struct ibmvnic_adapter *adapter);
+static void print_subcrq_error(struct device *dev, int rc, const char *func);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -2140,63 +2141,49 @@ static int ibmvnic_close(struct net_device *netdev)
}
/**
- * build_hdr_data - creates L2/L3/L4 header data buffer
+ * get_hdr_lens - fills list of L2/L3/L4 hdr lens
* @hdr_field: bitfield determining needed headers
* @skb: socket buffer
- * @hdr_len: array of header lengths
- * @hdr_data: buffer to write the header to
+ * @hdr_len: array of header lengths to be filled
*
* Reads hdr_field to determine which headers are needed by firmware.
* Builds a buffer containing these headers. Saves individual header
* lengths and total buffer length to be used to build descriptors.
+ *
+ * Return: total len of all headers
*/
-static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
- int *hdr_len, u8 *hdr_data)
+static int get_hdr_lens(u8 hdr_field, struct sk_buff *skb,
+ int *hdr_len)
{
int len = 0;
- u8 *hdr;
- if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
- hdr_len[0] = sizeof(struct vlan_ethhdr);
- else
- hdr_len[0] = sizeof(struct ethhdr);
+
+ if ((hdr_field >> 6) & 1) {
+ hdr_len[0] = skb_mac_header_len(skb);
+ len += hdr_len[0];
+ }
+
+ if ((hdr_field >> 5) & 1) {
+ hdr_len[1] = skb_network_header_len(skb);
+ len += hdr_len[1];
+ }
+
+ if (!((hdr_field >> 4) & 1))
+ return len;
if (skb->protocol == htons(ETH_P_IP)) {
- hdr_len[1] = ip_hdr(skb)->ihl * 4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
hdr_len[2] = tcp_hdrlen(skb);
else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
hdr_len[2] = sizeof(struct udphdr);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- hdr_len[1] = sizeof(struct ipv6hdr);
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
hdr_len[2] = tcp_hdrlen(skb);
else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
hdr_len[2] = sizeof(struct udphdr);
- } else if (skb->protocol == htons(ETH_P_ARP)) {
- hdr_len[1] = arp_hdr_len(skb->dev);
- hdr_len[2] = 0;
}
- memset(hdr_data, 0, 120);
- if ((hdr_field >> 6) & 1) {
- hdr = skb_mac_header(skb);
- memcpy(hdr_data, hdr, hdr_len[0]);
- len += hdr_len[0];
- }
-
- if ((hdr_field >> 5) & 1) {
- hdr = skb_network_header(skb);
- memcpy(hdr_data + len, hdr, hdr_len[1]);
- len += hdr_len[1];
- }
-
- if ((hdr_field >> 4) & 1) {
- hdr = skb_transport_header(skb);
- memcpy(hdr_data + len, hdr, hdr_len[2]);
- len += hdr_len[2];
- }
- return len;
+ return len + hdr_len[2];
}
/**
@@ -2209,12 +2196,14 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
*
* Creates header and, if needed, header extension descriptors and
* places them in a descriptor array, scrq_arr
+ *
+ * Return: Number of header descs
*/
static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
union sub_crq *scrq_arr)
{
- union sub_crq hdr_desc;
+ union sub_crq *hdr_desc;
int tmp_len = len;
int num_descs = 0;
u8 *data, *cur;
@@ -2223,28 +2212,26 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
while (tmp_len > 0) {
cur = hdr_data + len - tmp_len;
- memset(&hdr_desc, 0, sizeof(hdr_desc));
- if (cur != hdr_data) {
- data = hdr_desc.hdr_ext.data;
+ hdr_desc = &scrq_arr[num_descs];
+ if (num_descs) {
+ data = hdr_desc->hdr_ext.data;
tmp = tmp_len > 29 ? 29 : tmp_len;
- hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
- hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
- hdr_desc.hdr_ext.len = tmp;
+ hdr_desc->hdr_ext.first = IBMVNIC_CRQ_CMD;
+ hdr_desc->hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
+ hdr_desc->hdr_ext.len = tmp;
} else {
- data = hdr_desc.hdr.data;
+ data = hdr_desc->hdr.data;
tmp = tmp_len > 24 ? 24 : tmp_len;
- hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
- hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
- hdr_desc.hdr.len = tmp;
- hdr_desc.hdr.l2_len = (u8)hdr_len[0];
- hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
- hdr_desc.hdr.l4_len = (u8)hdr_len[2];
- hdr_desc.hdr.flag = hdr_field << 1;
+ hdr_desc->hdr.first = IBMVNIC_CRQ_CMD;
+ hdr_desc->hdr.type = IBMVNIC_HDR_DESC;
+ hdr_desc->hdr.len = tmp;
+ hdr_desc->hdr.l2_len = (u8)hdr_len[0];
+ hdr_desc->hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
+ hdr_desc->hdr.l4_len = (u8)hdr_len[2];
+ hdr_desc->hdr.flag = hdr_field << 1;
}
memcpy(data, cur, tmp);
tmp_len -= tmp;
- *scrq_arr = hdr_desc;
- scrq_arr++;
num_descs++;
}
@@ -2267,13 +2254,11 @@ static void build_hdr_descs_arr(struct sk_buff *skb,
int *num_entries, u8 hdr_field)
{
int hdr_len[3] = {0, 0, 0};
- u8 hdr_data[140] = {0};
int tot_len;
- tot_len = build_hdr_data(hdr_field, skb, hdr_len,
- hdr_data);
- *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
- indir_arr + 1);
+ tot_len = get_hdr_lens(hdr_field, skb, hdr_len);
+ *num_entries += create_hdr_descs(hdr_field, skb_mac_header(skb),
+ tot_len, hdr_len, indir_arr + 1);
}
static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
@@ -2350,8 +2335,29 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
}
}
+static int send_subcrq_direct(struct ibmvnic_adapter *adapter,
+ u64 remote_handle, u64 *entry)
+{
+ unsigned int ua = adapter->vdev->unit_address;
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
+
+ /* Make sure the hypervisor sees the complete request */
+ dma_wmb();
+ rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
+ cpu_to_be64(remote_handle),
+ cpu_to_be64(entry[0]), cpu_to_be64(entry[1]),
+ cpu_to_be64(entry[2]), cpu_to_be64(entry[3]));
+
+ if (rc)
+ print_subcrq_error(dev, rc, __func__);
+
+ return rc;
+}
+
static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
- struct ibmvnic_sub_crq_queue *tx_scrq)
+ struct ibmvnic_sub_crq_queue *tx_scrq,
+ bool indirect)
{
struct ibmvnic_ind_xmit_queue *ind_bufp;
u64 dma_addr;
@@ -2366,7 +2372,13 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
if (!entries)
return 0;
- rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
+
+ if (indirect)
+ rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
+ else
+ rc = send_subcrq_direct(adapter, handle,
+ (u64 *)ind_bufp->indir_arr);
+
if (rc)
ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
else
@@ -2397,6 +2409,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned long lpar_rc;
union sub_crq tx_crq;
unsigned int offset;
+ bool use_scrq_send_direct = false;
int num_entries = 1;
unsigned char *dst;
int bufidx = 0;
@@ -2424,7 +2437,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_dropped++;
tx_send_failed++;
ret = NETDEV_TX_OK;
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_err;
goto out;
@@ -2442,7 +2455,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_send_failed++;
tx_dropped++;
ret = NETDEV_TX_OK;
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_err;
goto out;
@@ -2456,6 +2469,20 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
memset(dst, 0, tx_pool->buf_size);
data_dma_addr = ltb->addr + offset;
+ /* if we are going to send_subcrq_direct this then we need to
+ * update the checksum before copying the data into ltb. Essentially
+ * these packets force disable CSO so that we can guarantee that
+ * FW does not need header info and we can send direct. Also, vnic
+ * server must be able to xmit standard packets without header data
+ */
+ if (*hdrs == 0 && !skb_is_gso(skb) &&
+ !ind_bufp->index && !netdev_xmit_more()) {
+ use_scrq_send_direct = true;
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_help(skb))
+ use_scrq_send_direct = false;
+ }
+
if (skb_shinfo(skb)->nr_frags) {
int cur, i;
@@ -2475,9 +2502,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
skb_copy_from_linear_data(skb, dst, skb->len);
}
- /* post changes to long_term_buff *dst before VIOS accessing it */
- dma_wmb();
-
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
@@ -2540,6 +2564,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
hdrs += 2;
+ } else if (use_scrq_send_direct) {
+ /* See above comment, CSO disabled with direct xmit */
+ tx_crq.v1.flags1 &= ~(IBMVNIC_TX_CHKSUM_OFFLOAD);
+ ind_bufp->index = 1;
+ tx_buff->num_entries = 1;
+ netdev_tx_sent_queue(txq, skb->len);
+ ind_bufp->indir_arr[0] = tx_crq;
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, false);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
+
+ goto early_exit;
}
if ((*hdrs >> 7) & 1)
@@ -2549,7 +2585,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_buff->num_entries = num_entries;
/* flush buffer if current entry can not fit */
if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_flush_err;
}
@@ -2557,15 +2593,17 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
indir_arr[0] = tx_crq;
memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
num_entries * sizeof(struct ibmvnic_generic_scrq));
+
ind_bufp->index += num_entries;
if (__netdev_tx_sent_queue(txq, skb->len,
netdev_xmit_more() &&
ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_err;
}
+early_exit:
if (atomic_add_return(num_entries, &tx_scrq->used)
>= adapter->req_tx_entries_per_subcrq) {
netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
@@ -3527,9 +3565,8 @@ restart_poll:
}
if (adapter->state != VNIC_CLOSING &&
- ((atomic_read(&adapter->rx_pool[scrq_num].available) <
- adapter->req_rx_add_entries_per_subcrq / 2) ||
- frames_processed < budget))
+ (atomic_read(&adapter->rx_pool[scrq_num].available) <
+ adapter->req_rx_add_entries_per_subcrq / 2))
replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
if (frames_processed < budget) {
if (napi_complete_done(napi, frames_processed)) {
@@ -4169,20 +4206,17 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
struct device *dev = &adapter->vdev->dev;
+ int num_packets = 0, total_bytes = 0;
struct ibmvnic_tx_pool *tx_pool;
struct ibmvnic_tx_buff *txbuff;
struct netdev_queue *txq;
union sub_crq *next;
- int index;
- int i;
+ int index, i;
restart_loop:
while (pending_scrq(adapter, scrq)) {
unsigned int pool = scrq->pool_index;
int num_entries = 0;
- int total_bytes = 0;
- int num_packets = 0;
-
next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) {
index = be32_to_cpu(next->tx_comp.correlators[i]);
@@ -4218,8 +4252,6 @@ restart_loop:
/* remove tx_comp scrq*/
next->tx_comp.first = 0;
- txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
- netdev_tx_completed_queue(txq, num_packets, total_bytes);
if (atomic_sub_return(num_entries, &scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
@@ -4244,6 +4276,9 @@ restart_loop:
goto restart_loop;
}
+ txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
+ netdev_tx_completed_queue(txq, num_packets, total_bytes);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index aa139b67a55b..3a5bbda235cb 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -146,7 +146,7 @@
#include <linux/string.h>
#include <linux/firmware.h>
#include <linux/rtnetlink.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define DRV_NAME "e100"
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 4b6e7536170a..fc8ed38aa095 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -108,8 +108,8 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_RPL_I219_V22 0x0DC8
#define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A
#define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B
-#define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C
-#define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D
+#define E1000_DEV_ID_PCH_ADP_I219_LM19 0x550C
+#define E1000_DEV_ID_PCH_ADP_I219_V19 0x550D
#define E1000_DEV_ID_PCH_LNP_I219_LM20 0x550E
#define E1000_DEV_ID_PCH_LNP_I219_V20 0x550F
#define E1000_DEV_ID_PCH_LNP_I219_LM21 0x5510
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 360ee26557f7..07e903346358 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6671,8 +6671,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
/* enable wakeup by the PHY */
retval = e1000_init_phy_wakeup(adapter, wufc);
- if (retval)
- return retval;
+ if (retval) {
+ e_err("Failed to enable wakeup\n");
+ goto skip_phy_configurations;
+ }
} else {
/* enable wakeup by the MAC */
ew32(WUFC, wufc);
@@ -6693,8 +6695,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
* or broadcast.
*/
retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
- if (retval)
- return retval;
+ if (retval) {
+ e_err("Failed to enable ULP\n");
+ goto skip_phy_configurations;
+ }
}
}
@@ -6726,6 +6730,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
hw->phy.ops.release(hw);
}
+skip_phy_configurations:
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@@ -6968,15 +6973,13 @@ static int e1000e_pm_suspend(struct device *dev)
e1000e_pm_freeze(dev);
rc = __e1000_shutdown(pdev, false);
- if (rc) {
- e1000e_pm_thaw(dev);
- } else {
+ if (!rc) {
/* Introduce S0ix implementation */
if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
e1000e_s0ix_entry_flow(adapter);
}
- return rc;
+ return 0;
}
static int e1000e_pm_resume(struct device *dev)
@@ -7896,10 +7899,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM19), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V19), board_pch_adp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_mtp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_mtp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_mtp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_mtp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_mtp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_mtp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_mtp },
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d546567e0286..2089a0e172bf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -4,6 +4,7 @@
#ifndef _I40E_H_
#define _I40E_H_
+#include <linux/linkmode.h>
#include <linux/pci.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/types.h>
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 1d0d2e526adb..f2506511bbff 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2555,16 +2555,12 @@ static int i40e_get_ts_info(struct net_device *dev,
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (pf->ptp_clock)
info->phc_index = ptp_clock_index(pf->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
@@ -5641,6 +5637,26 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static void i40e_eee_capability_to_kedata_supported(__le16 eee_capability_,
+ unsigned long *supported)
+{
+ const int eee_capability = le16_to_cpu(eee_capability_);
+ static const int lut[] = {
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ };
+
+ linkmode_zero(supported);
+ for (unsigned int i = ARRAY_SIZE(lut); i--; )
+ if (eee_capability & BIT(i + 1))
+ linkmode_set_bit(lut[i], supported);
+}
+
static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -5648,7 +5664,7 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- int status = 0;
+ int status;
/* Get initial PHY capabilities */
status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
@@ -5661,11 +5677,18 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
if (phy_cfg.eee_capability == 0)
return -EOPNOTSUPP;
+ i40e_eee_capability_to_kedata_supported(phy_cfg.eee_capability,
+ edata->supported);
+ linkmode_copy(edata->lp_advertised, edata->supported);
+
/* Get current configuration */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
if (status)
return -EAGAIN;
+ linkmode_zero(edata->advertised);
+ if (phy_cfg.eee_capability)
+ linkmode_copy(edata->advertised, edata->supported);
edata->eee_enabled = !!phy_cfg.eee_capability;
edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
@@ -5681,10 +5704,11 @@ static int i40e_is_eee_param_supported(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_ethtool_not_used {
- u32 value;
+ bool value;
const char *name;
} param[] = {
- {edata->tx_lpi_timer, "tx-timer"},
+ {!!(edata->advertised[0] & ~edata->supported[0]), "advertise"},
+ {!!edata->tx_lpi_timer, "tx-timer"},
{edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
};
int i;
@@ -5710,7 +5734,7 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
__le16 eee_capability;
- int status = 0;
+ int status;
/* Deny parameters we don't support */
if (i40e_is_eee_param_supported(netdev, edata))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index cbcfada7b357..25295ae370b2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1734,6 +1734,7 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
struct hlist_node *h;
int bkt;
+ lockdep_assert_held(&vsi->mac_filter_hash_lock);
if (vsi->info.pvid)
return i40e_add_filter(vsi, macaddr,
le16_to_cpu(vsi->info.pvid));
@@ -7264,6 +7265,26 @@ out:
}
#endif /* CONFIG_I40E_DCB */
+static void i40e_print_link_message_eee(struct i40e_vsi *vsi,
+ const char *speed, const char *fc)
+{
+ struct ethtool_keee kedata;
+
+ memzero_explicit(&kedata, sizeof(kedata));
+ if (vsi->netdev->ethtool_ops->get_eee)
+ vsi->netdev->ethtool_ops->get_eee(vsi->netdev, &kedata);
+
+ if (!linkmode_empty(kedata.supported))
+ netdev_info(vsi->netdev,
+ "NIC Link is Up, %sbps Full Duplex, Flow Control: %s, EEE: %s\n",
+ speed, fc,
+ kedata.eee_enabled ? "Enabled" : "Disabled");
+ else
+ netdev_info(vsi->netdev,
+ "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
+ speed, fc);
+}
+
/**
* i40e_print_link_message - print link up or down
* @vsi: the VSI for which link needs a message
@@ -7395,9 +7416,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
"NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
speed, req_fec, fec, an, fc);
} else {
- netdev_info(vsi->netdev,
- "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
- speed, fc);
+ i40e_print_link_message_eee(vsi, speed, fc);
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 662622f01e31..dfa785e39458 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2213,8 +2213,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->vsi_res[0].qset_handle
= le16_to_cpu(vsi->info.qs_handle[0]);
if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
eth_zero_addr(vf->default_lan_addr.addr);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
}
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->default_lan_addr.addr);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 23a6557fc3db..48cd1d06761c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -33,6 +33,7 @@
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_skbedit.h>
#include "iavf_type.h"
#include <linux/avf/virtchnl.h>
@@ -393,6 +394,8 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_VLAN_V2)
#define CRC_OFFLOAD_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_CRC)
+#define TC_U32_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_TC_U32)
#define VLAN_V2_FILTERING_ALLOWED(_a) \
(VLAN_V2_ALLOWED((_a)) && \
((_a)->vlan_v2_caps.filtering.filtering_support.outer || \
@@ -437,6 +440,7 @@ struct iavf_adapter {
#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
u16 fdir_active_fltr;
+ u16 raw_fdir_active_fltr;
struct list_head fdir_list_head;
spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */
@@ -444,6 +448,32 @@ struct iavf_adapter {
spinlock_t adv_rss_lock; /* protect the RSS management list */
};
+/* Must be called with fdir_fltr_lock lock held */
+static inline bool iavf_fdir_max_reached(struct iavf_adapter *adapter)
+{
+ return adapter->fdir_active_fltr + adapter->raw_fdir_active_fltr >=
+ IAVF_MAX_FDIR_FILTERS;
+}
+
+static inline void
+iavf_inc_fdir_active_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr)
+{
+ if (iavf_is_raw_fdir(fltr))
+ adapter->raw_fdir_active_fltr++;
+ else
+ adapter->fdir_active_fltr++;
+}
+
+static inline void
+iavf_dec_fdir_active_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr)
+{
+ if (iavf_is_raw_fdir(fltr))
+ adapter->raw_fdir_active_fltr--;
+ else
+ adapter->fdir_active_fltr--;
+}
/* Ethtool Private Flags */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 52273f7eab2c..74a1e9fe1821 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -927,7 +927,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
spin_lock_bh(&adapter->fdir_fltr_lock);
- rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
+ rule = iavf_find_fdir_fltr(adapter, false, fsp->location);
if (!rule) {
ret = -EINVAL;
goto release_lock;
@@ -1072,6 +1072,9 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
+ if (iavf_is_raw_fdir(fltr))
+ continue;
+
if (cnt == cmd->rule_cnt) {
val = -EMSGSIZE;
goto release_lock;
@@ -1263,15 +1266,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
return -EINVAL;
spin_lock_bh(&adapter->fdir_fltr_lock);
- if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
- spin_unlock_bh(&adapter->fdir_fltr_lock);
- dev_err(&adapter->pdev->dev,
- "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
- IAVF_MAX_FDIR_FILTERS);
- return -ENOSPC;
- }
-
- if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
+ if (iavf_find_fdir_fltr(adapter, false, fsp->location)) {
dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
spin_unlock_bh(&adapter->fdir_fltr_lock);
return -EEXIST;
@@ -1291,23 +1286,10 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
}
err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
- if (err)
- goto ret;
-
- spin_lock_bh(&adapter->fdir_fltr_lock);
- iavf_fdir_list_add_fltr(adapter, fltr);
- adapter->fdir_active_fltr++;
-
- if (adapter->link_up)
- fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
- else
- fltr->state = IAVF_FDIR_FLTR_INACTIVE;
- spin_unlock_bh(&adapter->fdir_fltr_lock);
+ if (!err)
+ err = iavf_fdir_add_fltr(adapter, fltr);
- if (adapter->link_up)
- iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
-ret:
- if (err && fltr)
+ if (err)
kfree(fltr);
mutex_unlock(&adapter->crit_lock);
@@ -1324,34 +1306,11 @@ ret:
static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
- struct iavf_fdir_fltr *fltr = NULL;
- int err = 0;
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
- spin_lock_bh(&adapter->fdir_fltr_lock);
- fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
- if (fltr) {
- if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
- fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
- } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
- list_del(&fltr->list);
- kfree(fltr);
- adapter->fdir_active_fltr--;
- fltr = NULL;
- } else {
- err = -EBUSY;
- }
- } else if (adapter->fdir_active_fltr) {
- err = -EINVAL;
- }
- spin_unlock_bh(&adapter->fdir_fltr_lock);
-
- if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
- iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
-
- return err;
+ return iavf_fdir_del_fltr(adapter, false, fsp->location);
}
/**
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
index 2d47b0b4640e..a1b3b44cc14a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
@@ -796,6 +796,9 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(tmp, &adapter->fdir_list_head, list) {
+ if (iavf_is_raw_fdir(fltr))
+ continue;
+
if (tmp->flow_type != fltr->flow_type)
continue;
@@ -815,33 +818,52 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *
}
/**
- * iavf_find_fdir_fltr_by_loc - find filter with location
+ * iavf_find_fdir_fltr - find FDIR filter
* @adapter: pointer to the VF adapter structure
- * @loc: location to find.
+ * @is_raw: filter type, is raw (tc u32) or not (ethtool)
+ * @data: data to ID the filter, type dependent
*
- * Returns pointer to Flow Director filter if found or null
+ * Returns: pointer to Flow Director filter if found or NULL. Lock must be held.
*/
-struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc)
+struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter,
+ bool is_raw, u32 data)
{
struct iavf_fdir_fltr *rule;
- list_for_each_entry(rule, &adapter->fdir_list_head, list)
- if (rule->loc == loc)
+ list_for_each_entry(rule, &adapter->fdir_list_head, list) {
+ if ((is_raw && rule->cls_u32_handle == data) ||
+ (!is_raw && rule->loc == data))
return rule;
+ }
return NULL;
}
/**
- * iavf_fdir_list_add_fltr - add a new node to the flow director filter list
+ * iavf_fdir_add_fltr - add a new node to the flow director filter list
* @adapter: pointer to the VF adapter structure
* @fltr: filter node to add to structure
+ *
+ * Return: 0 on success or negative errno on failure.
*/
-void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+int iavf_fdir_add_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr)
{
struct iavf_fdir_fltr *rule, *parent = NULL;
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ if (iavf_fdir_max_reached(adapter)) {
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ dev_err(&adapter->pdev->dev,
+ "Unable to add Flow Director filter (limit (%u) reached)\n",
+ IAVF_MAX_FDIR_FILTERS);
+ return -ENOSPC;
+ }
+
list_for_each_entry(rule, &adapter->fdir_list_head, list) {
+ if (iavf_is_raw_fdir(fltr))
+ break;
+
if (rule->loc >= fltr->loc)
break;
parent = rule;
@@ -851,4 +873,55 @@ void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr
list_add(&fltr->list, &parent->list);
else
list_add(&fltr->list, &adapter->fdir_list_head);
+
+ iavf_inc_fdir_active_fltr(adapter, fltr);
+
+ if (adapter->link_up)
+ fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ else
+ fltr->state = IAVF_FDIR_FLTR_INACTIVE;
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (adapter->link_up)
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
+
+ return 0;
+}
+
+/**
+ * iavf_fdir_del_fltr - delete a flow director filter from the list
+ * @adapter: pointer to the VF adapter structure
+ * @is_raw: filter type, is raw (tc u32) or not (ethtool)
+ * @data: data to ID the filter, type dependent
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data)
+{
+ struct iavf_fdir_fltr *fltr = NULL;
+ int err = 0;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ fltr = iavf_find_fdir_fltr(adapter, is_raw, data);
+
+ if (fltr) {
+ if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
+ fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
+ list_del(&fltr->list);
+ iavf_dec_fdir_active_fltr(adapter, fltr);
+ kfree(fltr);
+ fltr = NULL;
+ } else {
+ err = -EBUSY;
+ }
+ } else if (adapter->fdir_active_fltr) {
+ err = -EINVAL;
+ }
+
+ if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
+
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ return err;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
index d31bd923ba8c..e84a5351162f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
@@ -117,17 +117,26 @@ struct iavf_fdir_fltr {
u32 flow_id;
+ u32 cls_u32_handle; /* for FDIR added via tc u32 */
u32 loc; /* Rule location inside the flow table */
u32 q_index;
struct virtchnl_fdir_add vc_add_msg;
};
+static inline bool iavf_is_raw_fdir(struct iavf_fdir_fltr *fltr)
+{
+ return !fltr->vc_add_msg.rule_cfg.proto_hdrs.count;
+}
+
int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *fltr);
int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
-void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
-struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc);
+int iavf_fdir_add_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr);
+int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data);
+struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter,
+ bool is_raw, u32 data);
#endif /* _IAVF_FDIR_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index ff11bafb3b4f..f782402cd789 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -4013,7 +4013,7 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter,
/**
* iavf_setup_tc_cls_flower - flower classifier offloads
- * @adapter: board private structure
+ * @adapter: pointer to iavf adapter structure
* @cls_flower: pointer to flow_cls_offload struct with flow info
*/
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
@@ -4032,6 +4032,154 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
}
/**
+ * iavf_add_cls_u32 - Add U32 classifier offloads
+ * @adapter: pointer to iavf adapter structure
+ * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int iavf_add_cls_u32(struct iavf_adapter *adapter,
+ struct tc_cls_u32_offload *cls_u32)
+{
+ struct netlink_ext_ack *extack = cls_u32->common.extack;
+ struct virtchnl_fdir_rule *rule_cfg;
+ struct virtchnl_filter_action *vact;
+ struct virtchnl_proto_hdrs *hdrs;
+ struct ethhdr *spec_h, *mask_h;
+ const struct tc_action *act;
+ struct iavf_fdir_fltr *fltr;
+ struct tcf_exts *exts;
+ unsigned int q_index;
+ int i, status = 0;
+ int off_base = 0;
+
+ if (cls_u32->knode.link_handle) {
+ NL_SET_ERR_MSG_MOD(extack, "Linking not supported");
+ return -EOPNOTSUPP;
+ }
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr)
+ return -ENOMEM;
+
+ rule_cfg = &fltr->vc_add_msg.rule_cfg;
+ hdrs = &rule_cfg->proto_hdrs;
+ hdrs->count = 0;
+
+ /* The parser lib at the PF expects the packet starting with MAC hdr */
+ switch (ntohs(cls_u32->common.protocol)) {
+ case ETH_P_802_3:
+ break;
+ case ETH_P_IP:
+ spec_h = (struct ethhdr *)hdrs->raw.spec;
+ mask_h = (struct ethhdr *)hdrs->raw.mask;
+ spec_h->h_proto = htons(ETH_P_IP);
+ mask_h->h_proto = htons(0xFFFF);
+ off_base += ETH_HLEN;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Only 802_3 and ip filter protocols are supported");
+ status = -EOPNOTSUPP;
+ goto free_alloc;
+ }
+
+ for (i = 0; i < cls_u32->knode.sel->nkeys; i++) {
+ __be32 val, mask;
+ int off;
+
+ off = off_base + cls_u32->knode.sel->keys[i].off;
+ val = cls_u32->knode.sel->keys[i].val;
+ mask = cls_u32->knode.sel->keys[i].mask;
+
+ if (off >= sizeof(hdrs->raw.spec)) {
+ NL_SET_ERR_MSG_MOD(extack, "Input exceeds maximum allowed.");
+ status = -EINVAL;
+ goto free_alloc;
+ }
+
+ memcpy(&hdrs->raw.spec[off], &val, sizeof(val));
+ memcpy(&hdrs->raw.mask[off], &mask, sizeof(mask));
+ hdrs->raw.pkt_len = off + sizeof(val);
+ }
+
+ /* Only one action is allowed */
+ rule_cfg->action_set.count = 1;
+ vact = &rule_cfg->action_set.actions[0];
+ exts = cls_u32->knode.exts;
+
+ tcf_exts_for_each_action(i, act, exts) {
+ /* FDIR queue */
+ if (is_tcf_skbedit_rx_queue_mapping(act)) {
+ q_index = tcf_skbedit_rx_queue_mapping(act);
+ if (q_index >= adapter->num_active_queues) {
+ status = -EINVAL;
+ goto free_alloc;
+ }
+
+ vact->type = VIRTCHNL_ACTION_QUEUE;
+ vact->act_conf.queue.index = q_index;
+ break;
+ }
+
+ /* Drop */
+ if (is_tcf_gact_shot(act)) {
+ vact->type = VIRTCHNL_ACTION_DROP;
+ break;
+ }
+
+ /* Unsupported action */
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action.");
+ status = -EOPNOTSUPP;
+ goto free_alloc;
+ }
+
+ fltr->vc_add_msg.vsi_id = adapter->vsi.id;
+ fltr->cls_u32_handle = cls_u32->knode.handle;
+ return iavf_fdir_add_fltr(adapter, fltr);
+
+free_alloc:
+ kfree(fltr);
+ return status;
+}
+
+/**
+ * iavf_del_cls_u32 - Delete U32 classifier offloads
+ * @adapter: pointer to iavf adapter structure
+ * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int iavf_del_cls_u32(struct iavf_adapter *adapter,
+ struct tc_cls_u32_offload *cls_u32)
+{
+ return iavf_fdir_del_fltr(adapter, true, cls_u32->knode.handle);
+}
+
+/**
+ * iavf_setup_tc_cls_u32 - U32 filter offloads
+ * @adapter: pointer to iavf adapter structure
+ * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int iavf_setup_tc_cls_u32(struct iavf_adapter *adapter,
+ struct tc_cls_u32_offload *cls_u32)
+{
+ if (!TC_U32_SUPPORT(adapter) || !FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ switch (cls_u32->command) {
+ case TC_CLSU32_NEW_KNODE:
+ case TC_CLSU32_REPLACE_KNODE:
+ return iavf_add_cls_u32(adapter, cls_u32);
+ case TC_CLSU32_DELETE_KNODE:
+ return iavf_del_cls_u32(adapter, cls_u32);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* iavf_setup_tc_block_cb - block callback for tc
* @type: type of offload
* @type_data: offload data
@@ -4050,6 +4198,8 @@ static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
return iavf_setup_tc_cls_flower(cb_priv, type_data);
+ case TC_SETUP_CLSU32:
+ return iavf_setup_tc_cls_u32(cb_priv, type_data);
default:
return -EOPNOTSUPP;
}
@@ -4332,8 +4482,8 @@ static void iavf_disable_fdir(struct iavf_adapter *adapter)
fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
/* Delete filters not registered in PF */
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
} else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
@@ -4843,9 +4993,11 @@ int iavf_process_config(struct iavf_adapter *adapter)
/* get HW VLAN features that can be toggled */
hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
- /* Enable cloud filter if ADQ is supported */
- if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
+ /* Enable HW TC offload if ADQ or tc U32 is supported */
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ ||
+ TC_U32_SUPPORT(adapter))
hw_features |= NETIF_F_HW_TC;
+
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
hw_features |= NETIF_F_GSO_UDP_L4;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 1e543f6a7c30..7e810b65380c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -142,6 +142,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP |
+ VIRTCHNL_VF_OFFLOAD_TC_U32 |
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
VIRTCHNL_VF_OFFLOAD_CRC |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
@@ -1961,8 +1962,8 @@ static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
* list on PF is already cleared after a reset
*/
list_del(&f->list);
+ iavf_dec_fdir_active_fltr(adapter, f);
kfree(f);
- adapter->fdir_active_fltr--;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -2135,8 +2136,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
dev_err(&adapter->pdev->dev,
"%s\n", msg);
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -2451,8 +2452,12 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
- dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
- fdir->loc);
+ if (!iavf_is_raw_fdir(fdir))
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
+ fdir->loc);
+ else
+ dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is added\n",
+ TC_U32_USERHTID(fdir->cls_u32_handle));
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
fdir->flow_id = add_fltr->flow_id;
} else {
@@ -2460,8 +2465,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
add_fltr->status);
iavf_print_fdir_fltr(adapter, fdir);
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
}
}
}
@@ -2479,11 +2484,15 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
del_fltr->status ==
VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
- dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
- fdir->loc);
+ if (!iavf_is_raw_fdir(fdir))
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
+ fdir->loc);
+ else
+ dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is deleted\n",
+ TC_U32_USERHTID(fdir->cls_u32_handle));
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
} else {
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 03500e28ac99..3307d551f431 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -28,9 +28,13 @@ ice-y := ice_main.o \
ice_vlan_mode.o \
ice_flex_pipe.o \
ice_flow.o \
+ ice_parser.o \
+ ice_parser_rt.o \
ice_idc.o \
devlink/devlink.o \
devlink/devlink_port.o \
+ ice_sf_eth.o \
+ ice_sf_vsi_vlan_ops.o \
ice_ddp.o \
ice_fw_update.o \
ice_lag.o \
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 810a901d7afd..415445cefdb2 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -6,9 +6,11 @@
#include "ice.h"
#include "ice_lib.h"
#include "devlink.h"
+#include "devlink_port.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
#include "ice_dcb_lib.h"
+#include "ice_sf_eth.h"
/* context for devlink info version reporting */
struct ice_info_ctx {
@@ -744,6 +746,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
struct ice_sched_node *tc_node, struct ice_pf *pf)
{
struct devlink_rate *rate_node = NULL;
+ struct ice_dynamic_port *sf;
struct ice_vf *vf;
int i;
@@ -755,6 +758,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
/* create root node */
rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
} else if (node->vsi_handle &&
+ pf->vsi[node->vsi_handle]->type == ICE_VSI_VF &&
pf->vsi[node->vsi_handle]->vf) {
vf = pf->vsi[node->vsi_handle]->vf;
if (!vf->devlink_port.devlink_rate)
@@ -763,6 +767,16 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
*/
devl_rate_leaf_create(&vf->devlink_port, node,
node->parent->rate_node);
+ } else if (node->vsi_handle &&
+ pf->vsi[node->vsi_handle]->type == ICE_VSI_SF &&
+ pf->vsi[node->vsi_handle]->sf) {
+ sf = pf->vsi[node->vsi_handle]->sf;
+ if (!sf->devlink_port.devlink_rate)
+ /* leaf nodes doesn't have children
+ * so we don't set rate_node
+ */
+ devl_rate_leaf_create(&sf->devlink_port, node,
+ node->parent->rate_node);
} else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF &&
node->parent->rate_node) {
rate_node = devl_rate_node_create(devlink, node, node->name,
@@ -1277,8 +1291,12 @@ static const struct devlink_ops ice_devlink_ops = {
.rate_leaf_parent_set = ice_devlink_set_parent,
.rate_node_parent_set = ice_devlink_set_parent,
+
+ .port_new = ice_devlink_port_new,
};
+static const struct devlink_ops ice_sf_devlink_ops;
+
static int
ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
@@ -1562,6 +1580,34 @@ struct ice_pf *ice_allocate_pf(struct device *dev)
}
/**
+ * ice_allocate_sf - Allocate devlink and return SF structure pointer
+ * @dev: the device to allocate for
+ * @pf: pointer to the PF structure
+ *
+ * Allocate a devlink instance for SF.
+ *
+ * Return: ice_sf_priv pointer to allocated memory or ERR_PTR in case of error
+ */
+struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf)
+{
+ struct devlink *devlink;
+ int err;
+
+ devlink = devlink_alloc(&ice_sf_devlink_ops, sizeof(struct ice_sf_priv),
+ dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+
+ err = devl_nested_devlink_set(priv_to_devlink(pf), devlink);
+ if (err) {
+ devlink_free(devlink);
+ return ERR_PTR(err);
+ }
+
+ return devlink_priv(devlink);
+}
+
+/**
* ice_devlink_register - Register devlink interface for this PF
* @pf: the PF to register the devlink for.
*
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.h b/drivers/net/ethernet/intel/ice/devlink/devlink.h
index d291c0e2e17b..1af3b0763fbb 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.h
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.h
@@ -5,6 +5,7 @@
#define _ICE_DEVLINK_H_
struct ice_pf *ice_allocate_pf(struct device *dev);
+struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf);
void ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
index 00fed5a61d62..928c8bdb6649 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
@@ -5,6 +5,9 @@
#include "ice.h"
#include "devlink.h"
+#include "devlink_port.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
static int ice_active_port_option = -1;
@@ -337,7 +340,7 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
return -EIO;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pf->hw.bus.func;
+ attrs.phys.port_number = pf->hw.pf_id;
/* As FW supports only port split options for whole device,
* set port split options only for first PF.
@@ -455,7 +458,7 @@ int ice_devlink_create_vf_port(struct ice_vf *vf)
return -EINVAL;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
- attrs.pci_vf.pf = pf->hw.bus.func;
+ attrs.pci_vf.pf = pf->hw.pf_id;
attrs.pci_vf.vf = vf->vf_id;
ice_devlink_set_switch_id(pf, &attrs.switch_id);
@@ -485,3 +488,506 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf)
devl_rate_leaf_destroy(&vf->devlink_port);
devl_port_unregister(&vf->devlink_port);
}
+
+/**
+ * ice_devlink_create_sf_dev_port - Register virtual port for a subfunction
+ * @sf_dev: the subfunction device to create a devlink port for
+ *
+ * Register virtual flavour devlink port for the subfunction auxiliary device
+ * created after activating a dynamically added devlink port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev)
+{
+ struct devlink_port_attrs attrs = {};
+ struct ice_dynamic_port *dyn_port;
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+
+ dyn_port = sf_dev->dyn_port;
+ vsi = dyn_port->vsi;
+
+ devlink_port = &sf_dev->priv->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(sf_dev->priv);
+
+ return devl_port_register(devlink, devlink_port, vsi->idx);
+}
+
+/**
+ * ice_devlink_destroy_sf_dev_port - Destroy virtual port for a subfunction
+ * @sf_dev: the subfunction device to create a devlink port for
+ *
+ * Unregisters the virtual port associated with this subfunction.
+ */
+void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev)
+{
+ devl_port_unregister(&sf_dev->priv->devlink_port);
+}
+
+/**
+ * ice_activate_dynamic_port - Activate a dynamic port
+ * @dyn_port: dynamic port instance to activate
+ * @extack: extack for reporting error messages
+ *
+ * Activate the dynamic port based on its flavour.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_activate_dynamic_port(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (dyn_port->active)
+ return 0;
+
+ err = ice_sf_eth_activate(dyn_port, extack);
+ if (err)
+ return err;
+
+ dyn_port->active = true;
+
+ return 0;
+}
+
+/**
+ * ice_deactivate_dynamic_port - Deactivate a dynamic port
+ * @dyn_port: dynamic port instance to deactivate
+ *
+ * Undo activation of a dynamic port.
+ */
+static void ice_deactivate_dynamic_port(struct ice_dynamic_port *dyn_port)
+{
+ if (!dyn_port->active)
+ return;
+
+ ice_sf_eth_deactivate(dyn_port);
+ dyn_port->active = false;
+}
+
+/**
+ * ice_dealloc_dynamic_port - Deallocate and remove a dynamic port
+ * @dyn_port: dynamic port instance to deallocate
+ *
+ * Free resources associated with a dynamically added devlink port. Will
+ * deactivate the port if its currently active.
+ */
+static void ice_dealloc_dynamic_port(struct ice_dynamic_port *dyn_port)
+{
+ struct devlink_port *devlink_port = &dyn_port->devlink_port;
+ struct ice_pf *pf = dyn_port->pf;
+
+ ice_deactivate_dynamic_port(dyn_port);
+
+ xa_erase(&pf->sf_nums, devlink_port->attrs.pci_sf.sf);
+ ice_eswitch_detach_sf(pf, dyn_port);
+ ice_vsi_free(dyn_port->vsi);
+ xa_erase(&pf->dyn_ports, dyn_port->vsi->idx);
+ kfree(dyn_port);
+}
+
+/**
+ * ice_dealloc_all_dynamic_ports - Deallocate all dynamic devlink ports
+ * @pf: pointer to the pf structure
+ */
+void ice_dealloc_all_dynamic_ports(struct ice_pf *pf)
+{
+ struct ice_dynamic_port *dyn_port;
+ unsigned long index;
+
+ xa_for_each(&pf->dyn_ports, index, dyn_port)
+ ice_dealloc_dynamic_port(dyn_port);
+}
+
+/**
+ * ice_devlink_port_new_check_attr - Check that new port attributes are valid
+ * @pf: pointer to the PF structure
+ * @new_attr: the attributes for the new port
+ * @extack: extack for reporting error messages
+ *
+ * Check that the attributes for the new port are valid before continuing to
+ * allocate the devlink port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_new_check_attr(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack)
+{
+ if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
+ NL_SET_ERR_MSG_MOD(extack, "Flavour other than pcisf is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->controller_valid) {
+ NL_SET_ERR_MSG_MOD(extack, "Setting controller is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->port_index_valid) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver does not support user defined port index assignment");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->pfnum != pf->hw.pf_id) {
+ NL_SET_ERR_MSG_MOD(extack, "Incorrect pfnum supplied");
+ return -EINVAL;
+ }
+
+ if (!pci_msix_can_alloc_dyn(pf->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Dynamic MSIX-X interrupt allocation is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_del - devlink handler for port delete
+ * @devlink: pointer to devlink
+ * @port: devlink port to be deleted
+ * @extack: pointer to extack
+ *
+ * Deletes devlink port and deallocates all resources associated with
+ * created subfunction.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_del(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+ ice_dealloc_dynamic_port(dyn_port);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_hw_addr_set - devlink handler for mac address set
+ * @port: pointer to devlink port
+ * @hw_addr: hw address to set
+ * @hw_addr_len: hw address length
+ * @extack: extack for reporting error messages
+ *
+ * Sets mac address for the port, verifies arguments and copies address
+ * to the subfunction structure.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_hw_addr_set(struct devlink_port *port, const u8 *hw_addr,
+ int hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ if (dyn_port->attached) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ethernet address can be change only in detached state");
+ return -EBUSY;
+ }
+
+ if (hw_addr_len != ETH_ALEN || !is_valid_ether_addr(hw_addr)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid ethernet address");
+ return -EADDRNOTAVAIL;
+ }
+
+ ether_addr_copy(dyn_port->hw_addr, hw_addr);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_hw_addr_get - devlink handler for mac address get
+ * @port: pointer to devlink port
+ * @hw_addr: hw address to set
+ * @hw_addr_len: hw address length
+ * @extack: extack for reporting error messages
+ *
+ * Returns mac address for the port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_hw_addr_get(struct devlink_port *port, u8 *hw_addr,
+ int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ ether_addr_copy(hw_addr, dyn_port->hw_addr);
+ *hw_addr_len = ETH_ALEN;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_state_set - devlink handler for port state set
+ * @port: pointer to devlink port
+ * @state: state to set
+ * @extack: extack for reporting error messages
+ *
+ * Activates or deactivates the port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_state_set(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ switch (state) {
+ case DEVLINK_PORT_FN_STATE_ACTIVE:
+ return ice_activate_dynamic_port(dyn_port, extack);
+
+ case DEVLINK_PORT_FN_STATE_INACTIVE:
+ ice_deactivate_dynamic_port(dyn_port);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_state_get - devlink handler for port state get
+ * @port: pointer to devlink port
+ * @state: admin configured state of the port
+ * @opstate: current port operational state
+ * @extack: extack for reporting error messages
+ *
+ * Gets port state.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_state_get(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ if (dyn_port->active)
+ *state = DEVLINK_PORT_FN_STATE_ACTIVE;
+ else
+ *state = DEVLINK_PORT_FN_STATE_INACTIVE;
+
+ if (dyn_port->attached)
+ *opstate = DEVLINK_PORT_FN_OPSTATE_ATTACHED;
+ else
+ *opstate = DEVLINK_PORT_FN_OPSTATE_DETACHED;
+
+ return 0;
+}
+
+static const struct devlink_port_ops ice_devlink_port_sf_ops = {
+ .port_del = ice_devlink_port_del,
+ .port_fn_hw_addr_get = ice_devlink_port_fn_hw_addr_get,
+ .port_fn_hw_addr_set = ice_devlink_port_fn_hw_addr_set,
+ .port_fn_state_get = ice_devlink_port_fn_state_get,
+ .port_fn_state_set = ice_devlink_port_fn_state_set,
+};
+
+/**
+ * ice_reserve_sf_num - Reserve a subfunction number for this port
+ * @pf: pointer to the pf structure
+ * @new_attr: devlink port attributes requested
+ * @extack: extack for reporting error messages
+ * @sfnum: on success, the sf number reserved
+ *
+ * Reserve a subfunction number for this port. Only called for
+ * DEVLINK_PORT_FLAVOUR_PCI_SF ports.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_reserve_sf_num(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack, u32 *sfnum)
+{
+ int err;
+
+ /* If user didn't request an explicit number, pick one */
+ if (!new_attr->sfnum_valid)
+ return xa_alloc(&pf->sf_nums, sfnum, NULL, xa_limit_32b,
+ GFP_KERNEL);
+
+ /* Otherwise, check and use the number provided */
+ err = xa_insert(&pf->sf_nums, new_attr->sfnum, NULL, GFP_KERNEL);
+ if (err) {
+ if (err == -EBUSY)
+ NL_SET_ERR_MSG_MOD(extack, "Subfunction with given sfnum already exists");
+ return err;
+ }
+
+ *sfnum = new_attr->sfnum;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_create_sf_port - Register PCI subfunction devlink port
+ * @dyn_port: the dynamic port instance structure for this subfunction
+ *
+ * Register PCI subfunction flavour devlink port for a dynamically added
+ * subfunction port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct ice_pf *pf;
+
+ vsi = dyn_port->vsi;
+ pf = dyn_port->pf;
+
+ devlink_port = &dyn_port->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_SF;
+ attrs.pci_sf.pf = pf->hw.pf_id;
+ attrs.pci_sf.sf = dyn_port->sfnum;
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(pf);
+
+ return devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_port_sf_ops);
+}
+
+/**
+ * ice_devlink_destroy_sf_port - Destroy the devlink_port for this SF
+ * @dyn_port: the dynamic port instance structure for this subfunction
+ *
+ * Unregisters the devlink_port structure associated with this SF.
+ */
+void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port)
+{
+ devl_rate_leaf_destroy(&dyn_port->devlink_port);
+ devl_port_unregister(&dyn_port->devlink_port);
+}
+
+/**
+ * ice_alloc_dynamic_port - Allocate new dynamic port
+ * @pf: pointer to the pf structure
+ * @new_attr: devlink port attributes requested
+ * @extack: extack for reporting error messages
+ * @devlink_port: index of newly created devlink port
+ *
+ * Allocate a new dynamic port instance and prepare it for configuration
+ * with devlink.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_alloc_dynamic_port(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port)
+{
+ struct ice_dynamic_port *dyn_port;
+ struct ice_vsi *vsi;
+ u32 sfnum;
+ int err;
+
+ err = ice_reserve_sf_num(pf, new_attr, extack, &sfnum);
+ if (err)
+ return err;
+
+ dyn_port = kzalloc(sizeof(*dyn_port), GFP_KERNEL);
+ if (!dyn_port) {
+ err = -ENOMEM;
+ goto unroll_reserve_sf_num;
+ }
+
+ vsi = ice_vsi_alloc(pf);
+ if (!vsi) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VSI");
+ err = -ENOMEM;
+ goto unroll_dyn_port_alloc;
+ }
+
+ dyn_port->vsi = vsi;
+ dyn_port->pf = pf;
+ dyn_port->sfnum = sfnum;
+ eth_random_addr(dyn_port->hw_addr);
+
+ err = xa_insert(&pf->dyn_ports, vsi->idx, dyn_port, GFP_KERNEL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Port index reservation failed");
+ goto unroll_vsi_alloc;
+ }
+
+ err = ice_eswitch_attach_sf(pf, dyn_port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to attach SF to eswitch");
+ goto unroll_xa_insert;
+ }
+
+ *devlink_port = &dyn_port->devlink_port;
+
+ return 0;
+
+unroll_xa_insert:
+ xa_erase(&pf->dyn_ports, vsi->idx);
+unroll_vsi_alloc:
+ ice_vsi_free(vsi);
+unroll_dyn_port_alloc:
+ kfree(dyn_port);
+unroll_reserve_sf_num:
+ xa_erase(&pf->sf_nums, sfnum);
+
+ return err;
+}
+
+/**
+ * ice_devlink_port_new - devlink handler for the new port
+ * @devlink: pointer to devlink
+ * @new_attr: pointer to the port new attributes
+ * @extack: extack for reporting error messages
+ * @devlink_port: pointer to a new port
+ *
+ * Creates new devlink port, checks new port attributes and reject
+ * any unsupported parameters, allocates new subfunction for that port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int
+ice_devlink_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ int err;
+
+ err = ice_devlink_port_new_check_attr(pf, new_attr, extack);
+ if (err)
+ return err;
+
+ return ice_alloc_dynamic_port(pf, new_attr, extack, devlink_port);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
index 9223bcdb6444..d60efc340945 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
@@ -4,9 +4,55 @@
#ifndef _DEVLINK_PORT_H_
#define _DEVLINK_PORT_H_
+#include "../ice.h"
+#include "../ice_sf_eth.h"
+
+/**
+ * struct ice_dynamic_port - Track dynamically added devlink port instance
+ * @hw_addr: the HW address for this port
+ * @active: true if the port has been activated
+ * @attached: true it the prot is attached
+ * @devlink_port: the associated devlink port structure
+ * @pf: pointer to the PF private structure
+ * @vsi: the VSI associated with this port
+ * @repr_id: the representor ID
+ * @sfnum: the subfunction ID
+ * @sf_dev: pointer to the subfunction device
+ *
+ * An instance of a dynamically added devlink port. Each port flavour
+ */
+struct ice_dynamic_port {
+ u8 hw_addr[ETH_ALEN];
+ u8 active: 1;
+ u8 attached: 1;
+ struct devlink_port devlink_port;
+ struct ice_pf *pf;
+ struct ice_vsi *vsi;
+ unsigned long repr_id;
+ u32 sfnum;
+ /* Flavour-specific implementation data */
+ union {
+ struct ice_sf_dev *sf_dev;
+ };
+};
+
+void ice_dealloc_all_dynamic_ports(struct ice_pf *pf);
+
int ice_devlink_create_pf_port(struct ice_pf *pf);
void ice_devlink_destroy_pf_port(struct ice_pf *pf);
int ice_devlink_create_vf_port(struct ice_vf *vf);
void ice_devlink_destroy_vf_port(struct ice_vf *vf);
+int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port);
+void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port);
+int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev);
+void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev);
+
+#define ice_devlink_port_to_dyn(port) \
+ container_of(port, struct ice_dynamic_port, devlink_port)
+int
+ice_devlink_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port);
#endif /* _DEVLINK_PORT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index caaa10157909..d6f80da30dec 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -318,6 +318,7 @@ enum ice_vsi_state {
ICE_VSI_UMAC_FLTR_CHANGED,
ICE_VSI_MMAC_FLTR_CHANGED,
ICE_VSI_PROMISC_CHANGED,
+ ICE_VSI_REBUILD_PENDING,
ICE_VSI_STATE_NBITS /* must be last */
};
@@ -411,6 +412,7 @@ struct ice_vsi {
struct ice_tx_ring **xdp_rings; /* XDP ring array */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ struct mutex xdp_state_lock;
struct net_device **target_netdevs;
@@ -449,7 +451,12 @@ struct ice_vsi {
struct_group_tagged(ice_vsi_cfg_params, params,
struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_channel *ch; /* VSI's channel structure, may be NULL */
- struct ice_vf *vf; /* VF associated with this VSI, may be NULL */
+ union {
+ /* VF associated with this VSI, may be NULL */
+ struct ice_vf *vf;
+ /* SF associated with this VSI, may be NULL */
+ struct ice_dynamic_port *sf;
+ };
u32 flags; /* VSI flags used for rebuild and configuration */
enum ice_vsi_type type; /* the type of the VSI */
);
@@ -650,6 +657,9 @@ struct ice_pf {
struct ice_eswitch eswitch;
struct ice_esw_br_port *br_port;
+ struct xarray dyn_ports;
+ struct xarray sf_nums;
+
#define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1
#define ICE_MAX_PF_AGG_NODES 32
@@ -916,6 +926,7 @@ int ice_vsi_open(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
+void ice_set_ethtool_sf_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
@@ -1001,6 +1012,14 @@ void ice_unload(struct ice_pf *pf);
void ice_adv_lnk_speed_maps_init(void);
int ice_init_dev(struct ice_pf *pf);
void ice_deinit_dev(struct ice_pf *pf);
+int ice_change_mtu(struct net_device *netdev, int new_mtu);
+void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue);
+int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+void ice_set_netdev_features(struct net_device *netdev);
+int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
+int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
+void ice_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
/**
* ice_set_rdma_cap - enable RDMA support
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 66f02988d549..0be1a98d7cc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -2632,12 +2632,16 @@ struct ice_aq_desc {
/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
#define ICE_AQ_LG_BUF 512
+#define ICE_AQ_FLAG_DD_S 0
+#define ICE_AQ_FLAG_CMP_S 1
#define ICE_AQ_FLAG_ERR_S 2
#define ICE_AQ_FLAG_LB_S 9
#define ICE_AQ_FLAG_RD_S 10
#define ICE_AQ_FLAG_BUF_S 12
#define ICE_AQ_FLAG_SI_S 13
+#define ICE_AQ_FLAG_DD BIT(ICE_AQ_FLAG_DD_S) /* 0x1 */
+#define ICE_AQ_FLAG_CMP BIT(ICE_AQ_FLAG_CMP_S) /* 0x2 */
#define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */
#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */
#define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 1facf179a96f..4a9a6899fc45 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -190,16 +190,11 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
}
q_vector = vsi->q_vectors[v_idx];
- ice_for_each_tx_ring(tx_ring, q_vector->tx) {
- ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
- NULL);
+ ice_for_each_tx_ring(tx_ring, vsi->q_vectors[v_idx]->tx)
tx_ring->q_vector = NULL;
- }
- ice_for_each_rx_ring(rx_ring, q_vector->rx) {
- ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
- NULL);
+
+ ice_for_each_rx_ring(rx_ring, vsi->q_vectors[v_idx]->rx)
rx_ring->q_vector = NULL;
- }
/* only VSI with an associated netdev is set up with NAPI */
if (vsi->netdev)
@@ -330,6 +325,9 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
+ case ICE_VSI_SF:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ break;
default:
return;
}
@@ -513,6 +511,25 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
}
/**
+ * ice_get_frame_sz - calculate xdp_buff::frame_sz
+ * @rx_ring: the ring being configured
+ *
+ * Return frame size based on underlying PAGE_SIZE
+ */
+static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring)
+{
+ unsigned int frame_sz;
+
+#if (PAGE_SIZE >= 8192)
+ frame_sz = rx_ring->rx_buf_len;
+#else
+ frame_sz = ice_rx_pg_size(rx_ring) / 2;
+#endif
+
+ return frame_sz;
+}
+
+/**
* ice_vsi_cfg_rxq - Configure an Rx queue
* @ring: the ring being configured
*
@@ -526,7 +543,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
ring->rx_buf_len = ring->vsi->rx_buf_len;
- if (ring->vsi->type == ICE_VSI_PF) {
+ if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
@@ -576,7 +593,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
}
}
- xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
+ xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq);
ring->xdp.data = NULL;
ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
err = ice_setup_rx_ctx(ring);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 66f29bac783a..27208a60cece 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -10,6 +10,7 @@
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"
+#include "ice_parser.h"
#include <linux/avf/virtchnl.h>
#include "ice_switch.h"
#include "ice_fdir.h"
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index ffaa6511c455..e3959ad442a2 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -99,17 +99,6 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
return -ENOMEM;
cq->sq.desc_buf.size = size;
- cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
- sizeof(struct ice_sq_cd), GFP_KERNEL);
- if (!cq->sq.cmd_buf) {
- dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
- cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
- cq->sq.desc_buf.va = NULL;
- cq->sq.desc_buf.pa = 0;
- cq->sq.desc_buf.size = 0;
- return -ENOMEM;
- }
-
return 0;
}
@@ -188,7 +177,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
if (cq->rq_buf_size > ICE_AQ_LG_BUF)
desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
desc->opcode = 0;
- /* This is in accordance with Admin queue design, there is no
+ /* This is in accordance with control queue design, there is no
* register for buffer size configuration
*/
desc->datalen = cpu_to_le16(bi->size);
@@ -338,8 +327,6 @@ do { \
(qi)->ring.r.ring##_bi[i].size = 0;\
} \
} \
- /* free the buffer info list */ \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
/* free DMA head */ \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
} while (0)
@@ -405,11 +392,11 @@ init_ctrlq_exit:
}
/**
- * ice_init_rq - initialize ARQ
+ * ice_init_rq - initialize receive side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
- * The main initialization routine for the Admin Receive (Event) Queue.
+ * The main initialization routine for Receive side of a control queue.
* Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_rq_entries
@@ -465,7 +452,7 @@ init_ctrlq_exit:
}
/**
- * ice_shutdown_sq - shutdown the Control ATQ
+ * ice_shutdown_sq - shutdown the transmit side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
@@ -482,7 +469,7 @@ static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto shutdown_sq_out;
}
- /* Stop firmware AdminQ processing */
+ /* Stop processing of the control queue */
wr32(hw, cq->sq.head, 0);
wr32(hw, cq->sq.tail, 0);
wr32(hw, cq->sq.len, 0);
@@ -501,7 +488,7 @@ shutdown_sq_out:
}
/**
- * ice_aq_ver_check - Check the reported AQ API version.
+ * ice_aq_ver_check - Check the reported AQ API version
* @hw: pointer to the hardware structure
*
* Checks if the driver should load on a given AQ API version.
@@ -521,14 +508,20 @@ static bool ice_aq_ver_check(struct ice_hw *hw)
} else if (hw->api_maj_ver == exp_fw_api_ver_major) {
if (hw->api_min_ver > (exp_fw_api_ver_minor + 2))
dev_info(ice_hw_to_dev(hw),
- "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor)
dev_info(ice_hw_to_dev(hw),
- "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
} else {
/* Major API version is older than expected, log a warning */
dev_info(ice_hw_to_dev(hw),
- "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
}
return true;
}
@@ -855,7 +848,7 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw)
}
/**
- * ice_clean_sq - cleans Admin send queue (ATQ)
+ * ice_clean_sq - cleans send side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
@@ -865,21 +858,17 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
struct ice_ctl_q_ring *sq = &cq->sq;
u16 ntc = sq->next_to_clean;
- struct ice_sq_cd *details;
struct ice_aq_desc *desc;
desc = ICE_CTL_Q_DESC(*sq, ntc);
- details = ICE_CTL_Q_DETAILS(*sq, ntc);
while (rd32(hw, cq->sq.head) != ntc) {
ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
memset(desc, 0, sizeof(*desc));
- memset(details, 0, sizeof(*details));
ntc++;
if (ntc == sq->count)
ntc = 0;
desc = ICE_CTL_Q_DESC(*sq, ntc);
- details = ICE_CTL_Q_DETAILS(*sq, ntc);
}
sq->next_to_clean = ntc;
@@ -888,18 +877,43 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
}
/**
+ * ice_ctl_q_str - Convert control queue type to string
+ * @qtype: the control queue type
+ *
+ * Return: A string name for the given control queue type.
+ */
+static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
+{
+ switch (qtype) {
+ case ICE_CTL_Q_UNKNOWN:
+ return "Unknown CQ";
+ case ICE_CTL_Q_ADMIN:
+ return "AQ";
+ case ICE_CTL_Q_MAILBOX:
+ return "MBXQ";
+ case ICE_CTL_Q_SB:
+ return "SBQ";
+ default:
+ return "Unrecognized CQ";
+ }
+}
+
+/**
* ice_debug_cq
* @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
* @desc: pointer to control queue descriptor
* @buf: pointer to command buffer
* @buf_len: max length of buf
+ * @response: true if this is the writeback response
*
* Dumps debug log about control command with descriptor contents.
*/
-static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
+static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ void *desc, void *buf, u16 buf_len, bool response)
{
struct ice_aq_desc *cq_desc = desc;
- u16 len;
+ u16 datalen, flags;
if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
@@ -908,48 +922,63 @@ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
if (!desc)
return;
- len = le16_to_cpu(cq_desc->datalen);
+ datalen = le16_to_cpu(cq_desc->datalen);
+ flags = le16_to_cpu(cq_desc->flags);
- ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
- le16_to_cpu(cq_desc->opcode),
- le16_to_cpu(cq_desc->flags),
- le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
- ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n\tcookie (h,l) 0x%08X 0x%08X\n\tparam (0,1) 0x%08X 0x%08X\n\taddr (h,l) 0x%08X 0x%08X\n",
+ ice_ctl_q_str(cq->qtype), response ? "Response" : "Command",
+ le16_to_cpu(cq_desc->opcode), flags, datalen,
+ le16_to_cpu(cq_desc->retval),
le32_to_cpu(cq_desc->cookie_high),
- le32_to_cpu(cq_desc->cookie_low));
- ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->cookie_low),
le32_to_cpu(cq_desc->params.generic.param0),
- le32_to_cpu(cq_desc->params.generic.param1));
- ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->params.generic.param1),
le32_to_cpu(cq_desc->params.generic.addr_high),
le32_to_cpu(cq_desc->params.generic.addr_low));
- if (buf && cq_desc->datalen != 0) {
- ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
- if (buf_len < len)
- len = buf_len;
-
- ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len);
+ /* Dump buffer iff 1) one exists and 2) is either a response indicated
+ * by the DD and/or CMP flag set or a command with the RD flag set.
+ */
+ if (buf && cq_desc->datalen &&
+ (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP | ICE_AQ_FLAG_RD))) {
+ char prefix[] = KBUILD_MODNAME " 0x12341234 0x12341234 ";
+
+ sprintf(prefix, KBUILD_MODNAME " 0x%08X 0x%08X ",
+ le32_to_cpu(cq_desc->params.generic.addr_high),
+ le32_to_cpu(cq_desc->params.generic.addr_low));
+ ice_debug_array_w_prefix(hw, ICE_DBG_AQ_DESC_BUF, prefix,
+ buf,
+ min_t(u16, buf_len, datalen));
}
}
/**
- * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
+ * ice_sq_done - poll until the last send on a control queue has completed
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
*
- * Returns true if the firmware has processed all descriptors on the
- * admin send queue. Returns false if there are still requests pending.
+ * Use read_poll_timeout to poll the control queue head, checking until it
+ * matches next_to_use. According to the control queue designers, this has
+ * better timing reliability than the DD bit.
+ *
+ * Return: true if all the descriptors on the send side of a control queue
+ * are finished processing, false otherwise.
*/
static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- /* AQ designers suggest use of head for better
- * timing reliability than DD bit
+ u32 head;
+
+ /* Wait a short time before the initial check, to allow hardware time
+ * for completion.
*/
- return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
+ udelay(5);
+
+ return !rd32_poll_timeout(hw, cq->sq.head,
+ head, head == cq->sq.next_to_use,
+ 20, ICE_CTL_Q_SQ_CMD_TIMEOUT);
}
/**
- * ice_sq_send_cmd - send command to Control Queue (ATQ)
+ * ice_sq_send_cmd - send command to a control queue
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @desc: prefilled descriptor describing the command
@@ -957,8 +986,9 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure
*
- * This is the main send command routine for the ATQ. It runs the queue,
- * cleans the queue, etc.
+ * Main command for the transmit side of a control queue. It puts the command
+ * on the queue, bumps the tail, waits for processing of the command, captures
+ * command status and results, etc.
*/
int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@@ -968,8 +998,6 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_dma_mem *dma_buf = NULL;
struct ice_aq_desc *desc_on_ring;
bool cmd_completed = false;
- struct ice_sq_cd *details;
- unsigned long timeout;
int status = 0;
u16 retval = 0;
u32 val = 0;
@@ -1013,12 +1041,6 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
goto sq_send_command_error;
}
- details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
- if (cd)
- *details = *cd;
- else
- memset(details, 0, sizeof(*details));
-
/* Call clean and check queue available function to reclaim the
* descriptors that were processed by FW/MBX; the function returns the
* number of desc available. The clean function called here could be
@@ -1055,7 +1077,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
/* Debug desc and buffer */
ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
- ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
+ ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false);
(cq->sq.next_to_use)++;
if (cq->sq.next_to_use == cq->sq.count)
@@ -1063,20 +1085,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
wr32(hw, cq->sq.tail, cq->sq.next_to_use);
ice_flush(hw);
- /* Wait a short time before initial ice_sq_done() check, to allow
- * hardware time for completion.
+ /* Wait for the command to complete. If it finishes within the
+ * timeout, copy the descriptor back to temp.
*/
- udelay(5);
-
- timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT;
- do {
- if (ice_sq_done(hw, cq))
- break;
-
- usleep_range(100, 150);
- } while (time_before(jiffies, timeout));
-
- /* if ready, copy the desc back to temp */
if (ice_sq_done(hw, cq)) {
memcpy(desc, desc_on_ring, sizeof(*desc));
if (buf) {
@@ -1108,12 +1119,11 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
- ice_debug_cq(hw, (void *)desc, buf, buf_size);
+ ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true);
/* save writeback AQ if requested */
- if (details->wb_desc)
- memcpy(details->wb_desc, desc_on_ring,
- sizeof(*details->wb_desc));
+ if (cd && cd->wb_desc)
+ memcpy(cd->wb_desc, desc_on_ring, sizeof(*cd->wb_desc));
/* update the error if time out occurred */
if (!cmd_completed) {
@@ -1154,9 +1164,9 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
*
- * This function cleans one Admin Receive Queue element and returns
- * the contents through e. It can also return how many events are
- * left to process through 'pending'.
+ * Clean one element from the receive side of a control queue. On return 'e'
+ * contains contents of the message, and 'pending' contains the number of
+ * events left to process.
*/
int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@@ -1212,7 +1222,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
- ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
+ ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message size
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 1d54b1cdb1c5..ca97b7365a1b 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -43,14 +43,13 @@ enum ice_ctl_q {
};
/* Control Queue timeout settings - max delay 1s */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT HZ /* Wait max 1s */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT USEC_PER_SEC
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
struct ice_dma_mem desc_buf; /* descriptor ring memory */
- void *cmd_buf; /* command buffer memory */
union {
struct ice_dma_mem *sq_bi;
@@ -80,8 +79,6 @@ struct ice_sq_cd {
struct ice_aq_desc *wb_desc;
};
-#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i]))
-
/* rq event information */
struct ice_rq_event_info {
struct ice_aq_desc desc;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a94e7072b570..a7c510832824 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -187,6 +187,7 @@ void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
break;
case ICE_VSI_CHNL:
+ case ICE_VSI_SF:
vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi));
vsi->tc_cfg.numtc = 1;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index f182179529b7..272fd823a825 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -31,7 +31,7 @@ static const struct ice_tunnel_type_scan tnls[] = {
* Verifies various attributes of the package file, including length, format
* version, and the requirement of at least one segment.
*/
-static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+static enum ice_ddp_state ice_verify_pkg(const struct ice_pkg_hdr *pkg, u32 len)
{
u32 seg_count;
u32 i;
@@ -57,13 +57,13 @@ static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
/* all segments must fit within length */
for (i = 0; i < seg_count; i++) {
u32 off = le32_to_cpu(pkg->seg_offset[i]);
- struct ice_generic_seg_hdr *seg;
+ const struct ice_generic_seg_hdr *seg;
/* segment header must fit */
if (len < off + sizeof(*seg))
return ICE_DDP_PKG_INVALID_FILE;
- seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
+ seg = (void *)pkg + off;
/* segment body must fit */
if (len < off + le32_to_cpu(seg->seg_size))
@@ -119,13 +119,13 @@ static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
*
* This helper function validates a buffer's header.
*/
-static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
+static const struct ice_buf_hdr *ice_pkg_val_buf(const struct ice_buf *buf)
{
- struct ice_buf_hdr *hdr;
+ const struct ice_buf_hdr *hdr;
u16 section_count;
u16 data_end;
- hdr = (struct ice_buf_hdr *)buf->buf;
+ hdr = (const struct ice_buf_hdr *)buf->buf;
/* verify data */
section_count = le16_to_cpu(hdr->section_count);
if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
@@ -165,8 +165,8 @@ static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
* unexpected value has been detected (for example an invalid section count or
* an invalid buffer end value).
*/
-static struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg,
- struct ice_pkg_enum *state)
+static const struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state)
{
if (ice_seg) {
state->buf_table = ice_find_buf_table(ice_seg);
@@ -289,11 +289,11 @@ void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
* indicates a base offset of 10, and the index for the entry is 2, then
* section handler function should set the offset to 10 + 2 = 12.
*/
-static void *ice_pkg_enum_entry(struct ice_seg *ice_seg,
- struct ice_pkg_enum *state, u32 sect_type,
- u32 *offset,
- void *(*handler)(u32 sect_type, void *section,
- u32 index, u32 *offset))
+void *ice_pkg_enum_entry(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state, u32 sect_type,
+ u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset))
{
void *entry;
@@ -1800,9 +1800,9 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
* success it returns a pointer to the segment header, otherwise it will
* return NULL.
*/
-static struct ice_generic_seg_hdr *
+static const struct ice_generic_seg_hdr *
ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
- struct ice_pkg_hdr *pkg_hdr)
+ const struct ice_pkg_hdr *pkg_hdr)
{
u32 i;
@@ -1813,11 +1813,9 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
/* Search all package segments for the requested segment type */
for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
- struct ice_generic_seg_hdr *seg;
+ const struct ice_generic_seg_hdr *seg;
- seg = (struct ice_generic_seg_hdr
- *)((u8 *)pkg_hdr +
- le32_to_cpu(pkg_hdr->seg_offset[i]));
+ seg = (void *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]);
if (le32_to_cpu(seg->seg_type) == seg_type)
return seg;
@@ -2354,12 +2352,12 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
*
* Return: zero when update was successful, negative values otherwise.
*/
-int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
+int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
{
- u8 *current_topo, *new_topo = NULL;
- struct ice_run_time_cfg_seg *seg;
- struct ice_buf_hdr *section;
- struct ice_pkg_hdr *pkg_hdr;
+ u8 *new_topo = NULL, *topo __free(kfree) = NULL;
+ const struct ice_run_time_cfg_seg *seg;
+ const struct ice_buf_hdr *section;
+ const struct ice_pkg_hdr *pkg_hdr;
enum ice_ddp_state state;
u16 offset, size = 0;
u32 reg = 0;
@@ -2375,15 +2373,13 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
return -EOPNOTSUPP;
}
- current_topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
- if (!current_topo)
+ topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!topo)
return -ENOMEM;
- /* Get the current Tx topology */
- status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL,
- &flags, false);
-
- kfree(current_topo);
+ /* Get the current Tx topology flags */
+ status = ice_get_set_tx_topo(hw, topo, ICE_AQ_MAX_BUF_LEN, NULL, &flags,
+ false);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
@@ -2419,7 +2415,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
goto update_topo;
}
- pkg_hdr = (struct ice_pkg_hdr *)buf;
+ pkg_hdr = (const struct ice_pkg_hdr *)buf;
state = ice_verify_pkg(pkg_hdr, len);
if (state) {
ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n",
@@ -2428,7 +2424,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
}
/* Find runtime configuration segment */
- seg = (struct ice_run_time_cfg_seg *)
+ seg = (const struct ice_run_time_cfg_seg *)
ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
if (!seg) {
ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
@@ -2461,8 +2457,10 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
return -EIO;
}
- /* Get the new topology buffer */
- new_topo = ((u8 *)section) + offset;
+ /* Get the new topology buffer, reuse current topo copy mem */
+ static_assert(ICE_PKG_BUF_SIZE == ICE_AQ_MAX_BUF_LEN);
+ new_topo = topo;
+ memcpy(new_topo, (u8 *)section + offset, size);
update_topo:
/* Acquire global lock to make sure that set topology issued
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
index 622543f08b43..79551da2a4b0 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -261,10 +261,17 @@ struct ice_meta_sect {
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
+#define ICE_SID_RXPARSER_CAM 50
+#define ICE_SID_RXPARSER_NOMATCH_CAM 51
+#define ICE_SID_RXPARSER_IMEM 52
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_RXPARSER_PROTO_GRP 57
#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_TXPARSER_BOOST_TCAM 66
+#define ICE_SID_RXPARSER_MARKER_GRP 72
+#define ICE_SID_RXPARSER_PG_SPILL 76
+#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
@@ -276,6 +283,7 @@ struct ice_meta_sect {
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
+#define ICE_SID_RXPARSER_FLAG_REDIR 97
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
@@ -430,7 +438,7 @@ struct ice_pkg_enum {
u32 buf_idx;
u32 type;
- struct ice_buf_hdr *buf;
+ const struct ice_buf_hdr *buf;
u32 sect_idx;
void *sect;
u32 sect_type;
@@ -451,9 +459,14 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
+void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type, u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset));
void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type);
-int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
+int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index e92be6f130a3..74c0e7319a4c 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -9,6 +9,7 @@
#define ICE_CGU_STATE_ACQ_ERR_THRESHOLD 50
#define ICE_DPLL_PIN_IDX_INVALID 0xff
#define ICE_DPLL_RCLK_NUM_PER_PF 1
+#define ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT 25
/**
* enum ice_dpll_pin_type - enumerate ice pin types:
@@ -30,6 +31,10 @@ static const char * const pin_type_name[] = {
[ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input",
};
+static const struct dpll_pin_frequency ice_esync_range[] = {
+ DPLL_PIN_FREQUENCY_RANGE(0, DPLL_PIN_FREQUENCY_1_HZ),
+};
+
/**
* ice_dpll_is_reset - check if reset is in progress
* @pf: private board structure
@@ -394,8 +399,8 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
switch (pin_type) {
case ICE_DPLL_PIN_TYPE_INPUT:
- ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
- NULL, &pin->flags[0],
+ ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, &pin->status,
+ NULL, NULL, &pin->flags[0],
&pin->freq, &pin->phase_adjust);
if (ret)
goto err;
@@ -430,7 +435,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
goto err;
parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL;
- if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
+ if (ICE_AQC_GET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
pin->state[pf->dplls.eec.dpll_idx] =
parent == pf->dplls.eec.dpll_idx ?
DPLL_PIN_STATE_CONNECTED :
@@ -651,6 +656,8 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_dpll_pin *p = pin_priv;
struct ice_dpll *d = dpll_priv;
+ if (state == DPLL_PIN_STATE_SELECTABLE)
+ return -EINVAL;
if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED)
return 0;
@@ -1099,6 +1106,214 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_output_esync_set - callback for setting embedded sync
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @freq: requested embedded sync frequency
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting embedded sync frequency value
+ * on output pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_output_esync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ u8 flags = 0;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_OUT_EN)
+ flags = ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
+ if (freq == DPLL_PIN_FREQUENCY_1_HZ) {
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) {
+ ret = 0;
+ } else {
+ flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flags,
+ 0, 0, 0);
+ }
+ } else {
+ if (!(p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)) {
+ ret = 0;
+ } else {
+ flags &= ~ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flags,
+ 0, 0, 0);
+ }
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_output_esync_get - callback for getting embedded sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @esync: on success holds embedded sync pin properties
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting embedded sync frequency value
+ * and capabilities on output pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_output_esync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (!(p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_ABILITY) ||
+ p->freq != DPLL_PIN_FREQUENCY_10_MHZ) {
+ mutex_unlock(&pf->dplls.lock);
+ return -EOPNOTSUPP;
+ }
+ esync->range = ice_esync_range;
+ esync->range_num = ARRAY_SIZE(ice_esync_range);
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) {
+ esync->freq = DPLL_PIN_FREQUENCY_1_HZ;
+ esync->pulse = ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT;
+ } else {
+ esync->freq = 0;
+ esync->pulse = 0;
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_input_esync_set - callback for setting embedded sync
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @freq: requested embedded sync frequency
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting embedded sync frequency value
+ * on input pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_esync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ u8 flags_en = 0;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN)
+ flags_en = ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN;
+ if (freq == DPLL_PIN_FREQUENCY_1_HZ) {
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) {
+ ret = 0;
+ } else {
+ flags_en |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0,
+ flags_en, 0, 0);
+ }
+ } else {
+ if (!(p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN)) {
+ ret = 0;
+ } else {
+ flags_en &= ~ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0,
+ flags_en, 0, 0);
+ }
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_esync_get - callback for getting embedded sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @esync: on success holds embedded sync pin properties
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting embedded sync frequency value
+ * and capabilities on input pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_esync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (!(p->status & ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_CAP) ||
+ p->freq != DPLL_PIN_FREQUENCY_10_MHZ) {
+ mutex_unlock(&pf->dplls.lock);
+ return -EOPNOTSUPP;
+ }
+ esync->range = ice_esync_range;
+ esync->range_num = ARRAY_SIZE(ice_esync_range);
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) {
+ esync->freq = DPLL_PIN_FREQUENCY_1_HZ;
+ esync->pulse = ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT;
+ } else {
+ esync->freq = 0;
+ esync->pulse = 0;
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
* ice_dpll_rclk_state_on_pin_set - set a state on rclk pin
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -1222,6 +1437,8 @@ static const struct dpll_pin_ops ice_dpll_input_ops = {
.phase_adjust_get = ice_dpll_pin_phase_adjust_get,
.phase_adjust_set = ice_dpll_input_phase_adjust_set,
.phase_offset_get = ice_dpll_phase_offset_get,
+ .esync_set = ice_dpll_input_esync_set,
+ .esync_get = ice_dpll_input_esync_get,
};
static const struct dpll_pin_ops ice_dpll_output_ops = {
@@ -1232,6 +1449,8 @@ static const struct dpll_pin_ops ice_dpll_output_ops = {
.direction_get = ice_dpll_output_direction,
.phase_adjust_get = ice_dpll_pin_phase_adjust_get,
.phase_adjust_set = ice_dpll_output_phase_adjust_set,
+ .esync_set = ice_dpll_output_esync_set,
+ .esync_get = ice_dpll_output_esync_get,
};
static const struct dpll_device_ops ice_dpll_ops = {
@@ -1626,6 +1845,8 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
struct dpll_pin *parent;
int ret, i;
+ if (WARN_ON((!vsi || !vsi->netdev)))
+ return -EINVAL;
ret = ice_dpll_get_pins(pf, pin, start_idx, ICE_DPLL_RCLK_NUM_PER_PF,
pf->dplls.clock_id);
if (ret)
@@ -1641,8 +1862,6 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
if (ret)
goto unregister_pins;
}
- if (WARN_ON((!vsi || !vsi->netdev)))
- return -EINVAL;
dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
index 93172e93995b..c320f1bf7d6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.h
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -31,6 +31,7 @@ struct ice_dpll_pin {
struct dpll_pin_properties prop;
u32 freq;
s32 phase_adjust;
+ u8 status;
};
/** ice_dpll - store info required for DPLL control
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 3cfa071e3718..c0b3e70a7ea3 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -452,11 +452,9 @@ static void ice_eswitch_start_reprs(struct ice_pf *pf)
ice_eswitch_start_all_tx_queues(pf);
}
-int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
+static int
+ice_eswitch_attach(struct ice_pf *pf, struct ice_repr *repr, unsigned long *id)
{
- struct devlink *devlink = priv_to_devlink(pf);
- struct ice_repr *repr;
int err;
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
@@ -470,13 +468,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
ice_eswitch_stop_reprs(pf);
- devl_lock(devlink);
- repr = ice_repr_add_vf(vf);
- devl_unlock(devlink);
- if (IS_ERR(repr)) {
- err = PTR_ERR(repr);
+ err = repr->ops.add(repr);
+ if (err)
goto err_create_repr;
- }
err = ice_eswitch_setup_repr(pf, repr);
if (err)
@@ -486,7 +480,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
if (err)
goto err_xa_alloc;
- vf->repr_id = repr->id;
+ *id = repr->id;
ice_eswitch_start_reprs(pf);
@@ -495,9 +489,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
err_xa_alloc:
ice_eswitch_release_repr(pf, repr);
err_setup_repr:
- devl_lock(devlink);
- ice_repr_rem_vf(repr);
- devl_unlock(devlink);
+ repr->ops.rem(repr);
err_create_repr:
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
@@ -506,14 +498,59 @@ err_create_repr:
return err;
}
-void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
+/**
+ * ice_eswitch_attach_vf - attach VF to a eswitch
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF structure to be attached
+ *
+ * During attaching port representor for VF is created.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
{
- struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
+ struct ice_repr *repr = ice_repr_create_vf(vf);
struct devlink *devlink = priv_to_devlink(pf);
+ int err;
- if (!repr)
- return;
+ if (IS_ERR(repr))
+ return PTR_ERR(repr);
+
+ devl_lock(devlink);
+ err = ice_eswitch_attach(pf, repr, &vf->repr_id);
+ if (err)
+ ice_repr_destroy(repr);
+ devl_unlock(devlink);
+
+ return err;
+}
+
+/**
+ * ice_eswitch_attach_sf - attach SF to a eswitch
+ * @pf: pointer to PF structure
+ * @sf: pointer to SF structure to be attached
+ *
+ * During attaching port representor for SF is created.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
+{
+ struct ice_repr *repr = ice_repr_create_sf(sf);
+ int err;
+ if (IS_ERR(repr))
+ return PTR_ERR(repr);
+
+ err = ice_eswitch_attach(pf, repr, &sf->repr_id);
+ if (err)
+ ice_repr_destroy(repr);
+
+ return err;
+}
+
+static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr)
+{
ice_eswitch_stop_reprs(pf);
xa_erase(&pf->eswitch.reprs, repr->id);
@@ -521,10 +558,12 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
ice_eswitch_disable_switchdev(pf);
ice_eswitch_release_repr(pf, repr);
- devl_lock(devlink);
- ice_repr_rem_vf(repr);
+ repr->ops.rem(repr);
+ ice_repr_destroy(repr);
if (xa_empty(&pf->eswitch.reprs)) {
+ struct devlink *devlink = priv_to_devlink(pf);
+
/* since all port representors are destroyed, there is
* no point in keeping the nodes
*/
@@ -533,10 +572,42 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
} else {
ice_eswitch_start_reprs(pf);
}
+}
+
+/**
+ * ice_eswitch_detach_vf - detach VF from a eswitch
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF structure to be detached
+ */
+void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf)
+{
+ struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
+ struct devlink *devlink = priv_to_devlink(pf);
+
+ if (!repr)
+ return;
+
+ devl_lock(devlink);
+ ice_eswitch_detach(pf, repr);
devl_unlock(devlink);
}
/**
+ * ice_eswitch_detach_sf - detach SF from a eswitch
+ * @pf: pointer to PF structure
+ * @sf: pointer to SF structure to be detached
+ */
+void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
+{
+ struct ice_repr *repr = xa_load(&pf->eswitch.reprs, sf->repr_id);
+
+ if (!repr)
+ return;
+
+ ice_eswitch_detach(pf, repr);
+}
+
+/**
* ice_eswitch_get_target - get netdev based on src_vsi from descriptor
* @rx_ring: ring used to receive the packet
* @rx_desc: descriptor used to get src_vsi value
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index 78fd39a6935d..20ce32dda69c 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -5,11 +5,13 @@
#define _ICE_ESWITCH_H_
#include <net/devlink.h>
+#include "devlink/devlink_port.h"
#ifdef CONFIG_ICE_SWITCHDEV
-void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf);
-int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf);
+void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf);
+void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf);
+int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf);
+int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int
@@ -31,10 +33,20 @@ struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac);
void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac);
#else /* CONFIG_ICE_SWITCHDEV */
-static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
+static inline void
+ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf) { }
+
+static inline void
+ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) { }
+
+static inline int
+ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
static inline int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
+ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
index f5aceb32bf4d..cccb7ddf61c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
@@ -582,10 +582,13 @@ ice_eswitch_br_switchdev_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
+void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
{
struct ice_esw_br_fdb_entry *entry, *tmp;
+ if (!bridge)
+ return;
+
list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
index c15c7344d7f8..66a2c804338f 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
@@ -117,5 +117,6 @@ void
ice_eswitch_br_offloads_deinit(struct ice_pf *pf);
int
ice_eswitch_br_offloads_init(struct ice_pf *pf);
+void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge);
#endif /* _ICE_ESWITCH_BR_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index bc79ba974e49..d5cc934d1359 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3792,8 +3792,6 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -4414,7 +4412,7 @@ ice_repr_get_drvinfo(struct net_device *netdev,
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
- if (ice_check_vf_ready_for_cfg(repr->vf))
+ if (repr->ops.ready(repr))
return;
__ice_get_drvinfo(netdev, drvinfo, repr->src_vsi);
@@ -4426,8 +4424,7 @@ ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
struct ice_repr *repr = ice_netdev_to_repr(netdev);
/* for port representors only ETH_SS_STATS is supported */
- if (ice_check_vf_ready_for_cfg(repr->vf) ||
- stringset != ETH_SS_STATS)
+ if (repr->ops.ready(repr) || stringset != ETH_SS_STATS)
return;
__ice_get_strings(netdev, stringset, data, repr->src_vsi);
@@ -4440,7 +4437,7 @@ ice_repr_get_ethtool_stats(struct net_device *netdev,
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
- if (ice_check_vf_ready_for_cfg(repr->vf))
+ if (repr->ops.ready(repr))
return;
__ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi);
@@ -4725,6 +4722,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
.cap_rss_sym_xor_supported = true,
+ .rxfh_per_ctx_key = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_fec_stats = ice_get_fec_stats,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 20d5db88c99f..ed95072ca6e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -2981,6 +2981,50 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
}
/**
+ * ice_disable_fd_swap - set register appropriately to disable FD SWAP
+ * @hw: pointer to the HW struct
+ * @prof_id: profile ID
+ */
+static void
+ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id)
+{
+ u16 swap_val, fvw_num;
+ unsigned int i;
+
+ swap_val = ICE_SWAP_VALID;
+ fvw_num = hw->blk[ICE_BLK_FD].es.fvw / ICE_FDIR_REG_SET_SIZE;
+
+ /* Since the SWAP Flag in the Programming Desc doesn't work,
+ * here add method to disable the SWAP Option via setting
+ * certain SWAP and INSET register sets.
+ */
+ for (i = 0; i < fvw_num ; i++) {
+ u32 raw_swap, raw_in;
+ unsigned int j;
+
+ raw_swap = 0;
+ raw_in = 0;
+
+ for (j = 0; j < ICE_FDIR_REG_SET_SIZE; j++) {
+ raw_swap |= (swap_val++) << (j * BITS_PER_BYTE);
+ raw_in |= ICE_INSET_DFLT << (j * BITS_PER_BYTE);
+ }
+
+ /* write the FDIR swap register set */
+ wr32(hw, GLQF_FDSWAP(prof_id, i), raw_swap);
+
+ ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): 0x%x = 0x%08x\n",
+ prof_id, i, GLQF_FDSWAP(prof_id, i), raw_swap);
+
+ /* write the FDIR inset register set */
+ wr32(hw, GLQF_FDINSET(prof_id, i), raw_in);
+
+ ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): 0x%x = 0x%08x\n",
+ prof_id, i, GLQF_FDINSET(prof_id, i), raw_in);
+ }
+}
+
+/*
* ice_add_prof - add profile
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -2991,6 +3035,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* @es: extraction sequence (length of array is determined by the block)
* @masks: mask for extraction sequence
* @symm: symmetric setting for RSS profiles
+ * @fd_swap: enable/disable FDIR paired src/dst fields swap option
*
* This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
@@ -3000,7 +3045,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm)
+ struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap)
{
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
@@ -3020,7 +3065,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
status = ice_alloc_prof_id(hw, blk, &prof_id);
if (status)
goto err_ice_add_prof;
- if (blk == ICE_BLK_FD) {
+ if (blk == ICE_BLK_FD && fd_swap) {
/* For Flow Director block, the extraction sequence may
* need to be altered in the case where there are paired
* fields that have no match. This is necessary because
@@ -3031,6 +3076,8 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
status = ice_update_fd_swap(hw, prof_id, es);
if (status)
goto err_ice_add_prof;
+ } else if (blk == ICE_BLK_FD) {
+ ice_disable_fd_swap(hw, prof_id);
}
status = ice_update_prof_masking(hw, blk, prof_id, masks);
if (status)
@@ -4099,6 +4146,54 @@ err_ice_add_prof_id_flow:
}
/**
+ * ice_flow_assoc_fdir_prof - add an FDIR profile for main/ctrl VSI
+ * @hw: pointer to the HW struct
+ * @blk: HW block
+ * @dest_vsi: dest VSI
+ * @fdir_vsi: fdir programming VSI
+ * @hdl: profile handle
+ *
+ * Update the hardware tables to enable the FDIR profile indicated by @hdl for
+ * the VSI specified by @dest_vsi. On success, the flow will be enabled.
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+int
+ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
+ u16 dest_vsi, u16 fdir_vsi, u64 hdl)
+{
+ u16 vsi_num;
+ int status;
+
+ if (blk != ICE_BLK_FD)
+ return -EINVAL;
+
+ vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
+ status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for main VSI flow entry: %d\n",
+ status);
+ return status;
+ }
+
+ vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi);
+ status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for ctrl VSI flow entry: %d\n",
+ status);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, hdl);
+
+ return status;
+}
+
+/**
* ice_rem_prof_from_list - remove a profile from list
* @hw: pointer to the HW struct
* @lst: list to remove the profile from
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index b39d7cdc381f..90b9b0993122 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -6,6 +6,8 @@
#include "ice_type.h"
+#define ICE_FDIR_REG_SET_SIZE 4
+
int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
@@ -42,13 +44,16 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm);
+ struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
+int
+ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
+ u16 dest_vsi, u16 fdir_vsi, u64 hdl);
enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_ddp_state
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index fc2b58f56279..d97b751052f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -409,6 +409,29 @@ static const u32 ice_ptypes_gtpc_tid[] = {
};
/* Packet types for GTPU */
+static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
+ { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
+};
+
static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
{ ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
@@ -1400,7 +1423,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
params->attr, params->attr_cnt, params->es,
- params->mask, symm);
+ params->mask, symm, true);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
@@ -1523,6 +1546,90 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
return status;
}
+#define FLAG_GTP_EH_PDU_LINK BIT_ULL(13)
+#define FLAG_GTP_EH_PDU BIT_ULL(14)
+
+#define HI_BYTE_IN_WORD GENMASK(15, 8)
+#define LO_BYTE_IN_WORD GENMASK(7, 0)
+
+#define FLAG_GTPU_MSK \
+ (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
+#define FLAG_GTPU_UP \
+ (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
+#define FLAG_GTPU_DW FLAG_GTP_EH_PDU
+
+/**
+ * ice_flow_set_parser_prof - Set flow profile based on the parsed profile info
+ * @hw: pointer to the HW struct
+ * @dest_vsi: dest VSI
+ * @fdir_vsi: fdir programming VSI
+ * @prof: stores parsed profile info from raw flow
+ * @blk: classification blk
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+int
+ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
+ struct ice_parser_profile *prof, enum ice_block blk)
+{
+ u64 id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
+ struct ice_flow_prof_params *params __free(kfree);
+ u8 fv_words = hw->blk[blk].es.fvw;
+ int status;
+ int i, idx;
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
+ params->es[i].prot_id = ICE_PROT_INVALID;
+ params->es[i].off = ICE_FV_OFFSET_INVAL;
+ }
+
+ for (i = 0; i < prof->fv_num; i++) {
+ if (hw->blk[blk].es.reverse)
+ idx = fv_words - i - 1;
+ else
+ idx = i;
+ params->es[idx].prot_id = prof->fv[i].proto_id;
+ params->es[idx].off = prof->fv[i].offset;
+ params->mask[idx] = (((prof->fv[i].msk) << BITS_PER_BYTE) &
+ HI_BYTE_IN_WORD) |
+ (((prof->fv[i].msk) >> BITS_PER_BYTE) &
+ LO_BYTE_IN_WORD);
+ }
+
+ switch (prof->flags) {
+ case FLAG_GTPU_DW:
+ params->attr = ice_attr_gtpu_down;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
+ break;
+ case FLAG_GTPU_UP:
+ params->attr = ice_attr_gtpu_up;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
+ break;
+ default:
+ if (prof->flags_msk & FLAG_GTPU_MSK) {
+ params->attr = ice_attr_gtpu_session;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
+ }
+ break;
+ }
+
+ status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
+ params->attr, params->attr_cnt,
+ params->es, params->mask, false, false);
+ if (status)
+ return status;
+
+ status = ice_flow_assoc_fdir_prof(hw, blk, dest_vsi, fdir_vsi, id);
+ if (status)
+ ice_rem_prof(hw, blk, id);
+
+ return status;
+}
+
/**
* ice_flow_add_prof - Add a flow profile for packet segments and matched fields
* @hw: pointer to the HW struct
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 2fd2e0cb483d..6cb7bb879c98 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -5,6 +5,7 @@
#define _ICE_FLOW_H_
#include "ice_flex_type.h"
+#include "ice_parser.h"
#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
@@ -326,6 +327,7 @@ enum ice_rss_cfg_hdr_type {
ICE_RSS_ANY_HEADERS
};
+struct ice_vsi;
struct ice_rss_hash_cfg {
u32 addl_hdrs; /* protocol header fields */
u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
@@ -445,6 +447,9 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
bool symm, struct ice_flow_prof **prof);
int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
int
+ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
+ struct ice_parser_profile *prof, enum ice_block blk);
+int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi, enum ice_flow_priority prio,
void *data, u64 *entry_h);
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index f81db6c107c8..2702a0da5c3e 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2019, Intel Corporation. */
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/uuid.h>
#include <linux/crc32.h>
#include <linux/pldmfw.h>
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index f559e60992fa..06e712cdc3d9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -7,6 +7,7 @@
#include "ice_lib.h"
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
+#include "ice_type.h"
#include "ice_vsi_vlan_ops.h"
/**
@@ -20,6 +21,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_PF";
case ICE_VSI_VF:
return "ICE_VSI_VF";
+ case ICE_VSI_SF:
+ return "ICE_VSI_SF";
case ICE_VSI_CTRL:
return "ICE_VSI_CTRL";
case ICE_VSI_CHNL:
@@ -135,6 +138,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_SF:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
/* a user could change the values of num_[tr]x_desc using
@@ -201,6 +205,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
max_t(int, vsi->alloc_rxq,
vsi->alloc_txq));
break;
+ case ICE_VSI_SF:
+ vsi->alloc_txq = 1;
+ vsi->alloc_rxq = 1;
+ vsi->num_q_vectors = 1;
+ vsi->irq_dyn_alloc = true;
+ break;
case ICE_VSI_VF:
if (vf->num_req_qs)
vf->num_vf_qs = vf->num_req_qs;
@@ -423,7 +433,7 @@ err_out:
* This deallocates the VSI's queue resources, removes it from the PF's
* VSI array if necessary, and deallocates the VSI
*/
-static void ice_vsi_free(struct ice_vsi *vsi)
+void ice_vsi_free(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
struct device *dev;
@@ -447,6 +457,7 @@ static void ice_vsi_free(struct ice_vsi *vsi)
ice_vsi_free_stats(vsi);
ice_vsi_free_arrays(vsi);
+ mutex_destroy(&vsi->xdp_state_lock);
mutex_unlock(&pf->sw_mutex);
devm_kfree(dev, vsi);
}
@@ -558,6 +569,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_SF:
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
@@ -594,7 +606,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
*
* returns a pointer to a VSI on success, NULL on failure.
*/
-static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
+struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
@@ -626,6 +638,8 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
pf->next_vsi);
+ mutex_init(&vsi->xdp_state_lock);
+
unlock_pf:
mutex_unlock(&pf->sw_mutex);
return vsi;
@@ -886,6 +900,11 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
max_rss_size);
vsi->rss_lut_type = ICE_LUT_PF;
break;
+ case ICE_VSI_SF:
+ vsi->rss_table_size = ICE_LUT_VSI_SIZE;
+ vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
+ vsi->rss_lut_type = ICE_LUT_VSI;
+ break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
* For VSI_LUT, LUT size should be set to 64 bytes.
@@ -1133,6 +1152,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
break;
case ICE_VSI_VF:
+ case ICE_VSI_SF:
/* VF VSI will gets a small RSS table which is a VSI LUT type */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
break;
@@ -1211,6 +1231,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
+ case ICE_VSI_SF:
case ICE_VSI_CHNL:
ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
break;
@@ -2092,6 +2113,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
case ICE_VSI_CHNL:
case ICE_VSI_LB:
case ICE_VSI_PF:
+ case ICE_VSI_SF:
max_agg_nodes = ICE_MAX_PF_AGG_NODES;
agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
agg_node_iter = &pf->pf_agg_node[0];
@@ -2261,6 +2283,7 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_CTRL:
+ case ICE_VSI_SF:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2286,9 +2309,6 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
ice_vsi_map_rings_to_vectors(vsi);
- /* Associate q_vector rings to napi */
- ice_vsi_set_napi_queues(vsi);
-
vsi->stat_offsets_loaded = false;
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
@@ -2413,20 +2433,13 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
int err;
- /* The Rx rule will only exist to remove if the LLDP FW
- * engine is currently stopped
- */
- if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
- !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, false);
-
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
if (err)
dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
vsi->vsi_num, err);
- if (ice_is_xdp_ena_vsi(vsi))
+ if (vsi->xdp_rings)
/* return value check can be skipped here, it always returns
* 0 if reset is in progress
*/
@@ -2528,7 +2541,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
for (q = 0; q < q_vector->num_ring_tx; q++) {
ice_write_itr(&q_vector->tx, 0);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
- if (ice_is_xdp_ena_vsi(vsi)) {
+ if (vsi->xdp_rings) {
u32 xdp_txq = txq + vsi->num_xdp_txq;
wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
@@ -2628,6 +2641,7 @@ void ice_vsi_close(struct ice_vsi *vsi)
if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
ice_down(vsi);
+ ice_vsi_clear_napi_queues(vsi);
ice_vsi_free_irq(vsi);
ice_vsi_free_tx_rings(vsi);
ice_vsi_free_rx_rings(vsi);
@@ -2647,7 +2661,8 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
- if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
@@ -2671,143 +2686,99 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
*/
void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
- if (test_bit(ICE_VSI_DOWN, vsi->state))
- return;
+ bool already_down = test_bit(ICE_VSI_DOWN, vsi->state);
set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
- if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
-
- ice_vsi_close(vsi);
+ already_down = test_bit(ICE_VSI_DOWN, vsi->state);
+ if (!already_down)
+ ice_vsi_close(vsi);
if (!locked)
rtnl_unlock();
- } else {
+ } else if (!already_down) {
ice_vsi_close(vsi);
}
- } else if (vsi->type == ICE_VSI_CTRL) {
+ } else if (vsi->type == ICE_VSI_CTRL && !already_down) {
ice_vsi_close(vsi);
}
}
/**
- * __ice_queue_set_napi - Set the napi instance for the queue
- * @dev: device to which NAPI and queue belong
- * @queue_index: Index of queue
- * @type: queue type as RX or TX
- * @napi: NAPI context
- * @locked: is the rtnl_lock already held
- *
- * Set the napi instance for the queue. Caller indicates the lock status.
- */
-static void
-__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
- enum netdev_queue_type type, struct napi_struct *napi,
- bool locked)
-{
- if (!locked)
- rtnl_lock();
- netif_queue_set_napi(dev, queue_index, type, napi);
- if (!locked)
- rtnl_unlock();
-}
-
-/**
- * ice_queue_set_napi - Set the napi instance for the queue
- * @vsi: VSI being configured
- * @queue_index: Index of queue
- * @type: queue type as RX or TX
- * @napi: NAPI context
+ * ice_vsi_set_napi_queues - associate netdev queues with napi
+ * @vsi: VSI pointer
*
- * Set the napi instance for the queue. The rtnl lock state is derived from the
- * execution path.
+ * Associate queue[s] with napi for all vectors.
+ * The caller must hold rtnl_lock.
*/
-void
-ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
- enum netdev_queue_type type, struct napi_struct *napi)
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
+ struct net_device *netdev = vsi->netdev;
+ int q_idx, v_idx;
- if (!vsi->netdev)
+ if (!netdev)
return;
- if (current_work() == &pf->serv_task ||
- test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
- test_bit(ICE_DOWN, pf->state) ||
- test_bit(ICE_SUSPENDED, pf->state))
- __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
- false);
- else
- __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
- true);
-}
+ ice_for_each_rxq(vsi, q_idx)
+ netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX,
+ &vsi->rx_rings[q_idx]->q_vector->napi);
-/**
- * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
- * @q_vector: q_vector pointer
- * @locked: is the rtnl_lock already held
- *
- * Associate the q_vector napi with all the queue[s] on the vector.
- * Caller indicates the lock status.
- */
-void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
-{
- struct ice_rx_ring *rx_ring;
- struct ice_tx_ring *tx_ring;
-
- ice_for_each_rx_ring(rx_ring, q_vector->rx)
- __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
- NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
- locked);
-
- ice_for_each_tx_ring(tx_ring, q_vector->tx)
- __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
- NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
- locked);
+ ice_for_each_txq(vsi, q_idx)
+ netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX,
+ &vsi->tx_rings[q_idx]->q_vector->napi);
/* Also set the interrupt number for the NAPI */
- netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+ }
}
/**
- * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
- * @q_vector: q_vector pointer
+ * ice_vsi_clear_napi_queues - dissociate netdev queues from napi
+ * @vsi: VSI pointer
*
- * Associate the q_vector napi with all the queue[s] on the vector
+ * Clear the association between all VSI queues queue[s] and napi.
+ * The caller must hold rtnl_lock.
*/
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
+void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
{
- struct ice_rx_ring *rx_ring;
- struct ice_tx_ring *tx_ring;
+ struct net_device *netdev = vsi->netdev;
+ int q_idx;
- ice_for_each_rx_ring(rx_ring, q_vector->rx)
- ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
- NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
+ if (!netdev)
+ return;
- ice_for_each_tx_ring(tx_ring, q_vector->tx)
- ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
- NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
- /* Also set the interrupt number for the NAPI */
- netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+ ice_for_each_txq(vsi, q_idx)
+ netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL);
+
+ ice_for_each_rxq(vsi, q_idx)
+ netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, NULL);
}
/**
- * ice_vsi_set_napi_queues
- * @vsi: VSI pointer
+ * ice_napi_add - register NAPI handler for the VSI
+ * @vsi: VSI for which NAPI handler is to be registered
*
- * Associate queue[s] with napi for all vectors
+ * This function is only called in the driver's load path. Registering the NAPI
+ * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
+ * reset/rebuild, etc.)
*/
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
+void ice_napi_add(struct ice_vsi *vsi)
{
- int i;
+ int v_idx;
if (!vsi->netdev)
return;
- ice_for_each_q_vector(vsi, i)
- ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
+ ice_for_each_q_vector(vsi, v_idx)
+ netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
+ ice_napi_poll);
}
/**
@@ -2828,6 +2799,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_rss_clean(vsi);
ice_vsi_close(vsi);
+
+ /* The Rx rule will only exist to remove if the LLDP FW
+ * engine is currently stopped
+ */
+ if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
+ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ ice_cfg_sw_lldp(vsi, false, false);
+
ice_vsi_decfg(vsi);
/* retain SW VSI data structure since it is needed to unregister and
@@ -3039,19 +3018,23 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
+ mutex_lock(&vsi->xdp_state_lock);
+
ret = ice_vsi_realloc_stat_arrays(vsi);
if (ret)
- goto err_vsi_cfg;
+ goto unlock;
ice_vsi_decfg(vsi);
ret = ice_vsi_cfg_def(vsi);
if (ret)
- goto err_vsi_cfg;
+ goto unlock;
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
- if (!coalesce)
- return -ENOMEM;
+ if (!coalesce) {
+ ret = -ENOMEM;
+ goto decfg;
+ }
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
@@ -3059,22 +3042,23 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (ret) {
if (vsi_flags & ICE_VSI_FLAG_INIT) {
ret = -EIO;
- goto err_vsi_cfg_tc_lan;
+ goto free_coalesce;
}
- kfree(coalesce);
- return ice_schedule_reset(pf, ICE_RESET_PFR);
+ ret = ice_schedule_reset(pf, ICE_RESET_PFR);
+ goto free_coalesce;
}
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
- kfree(coalesce);
-
- return 0;
+ clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state);
-err_vsi_cfg_tc_lan:
- ice_vsi_decfg(vsi);
+free_coalesce:
kfree(coalesce);
-err_vsi_cfg:
+decfg:
+ if (ret)
+ ice_vsi_decfg(vsi);
+unlock:
+ mutex_unlock(&vsi->xdp_state_lock);
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 94ce8964dda6..1a6cfc8693ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -44,15 +44,10 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
-void
-ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
- enum netdev_queue_type type, struct napi_struct *napi);
-
-void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
-
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector);
-
void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
+void ice_napi_add(struct ice_vsi *vsi);
+
+void ice_vsi_clear_napi_queues(struct ice_vsi *vsi);
int ice_vsi_release(struct ice_vsi *vsi);
@@ -65,6 +60,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags);
int ice_vsi_cfg(struct ice_vsi *vsi);
+struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf);
+void ice_vsi_free(struct ice_vsi *vsi);
bool ice_is_reset_in_progress(unsigned long *state);
int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 6f97ed471fe9..b1e7727b8677 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -15,6 +15,7 @@
#include "ice_dcb_nl.h"
#include "devlink/devlink.h"
#include "devlink/devlink_port.h"
+#include "ice_sf_eth.h"
#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
* ice tracepoint functions. This must be done exactly once across the
@@ -86,7 +87,8 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
bool netif_is_ice(const struct net_device *dev)
{
- return dev && (dev->netdev_ops == &ice_netdev_ops);
+ return dev && (dev->netdev_ops == &ice_netdev_ops ||
+ dev->netdev_ops == &ice_netdev_safe_mode_ops);
}
/**
@@ -520,25 +522,6 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
}
/**
- * ice_clear_sw_switch_recipes - clear switch recipes
- * @pf: board private structure
- *
- * Mark switch recipes as not created in sw structures. There are cases where
- * rules (especially advanced rules) need to be restored, either re-read from
- * hardware or added again. For example after the reset. 'recp_created' flag
- * prevents from doing that and need to be cleared upfront.
- */
-static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
-{
- struct ice_sw_recipe *recp;
- u8 i;
-
- recp = pf->hw.switch_info->recp_list;
- for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
- recp[i].recp_created = false;
-}
-
-/**
* ice_prepare_for_reset - prep for reset
* @pf: board private structure
* @reset_type: reset type requested
@@ -574,8 +557,9 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
mutex_unlock(&pf->vfs.table_lock);
if (ice_is_eswitch_mode_switchdev(pf)) {
- if (reset_type != ICE_RESET_PFR)
- ice_clear_sw_switch_recipes(pf);
+ rtnl_lock();
+ ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge);
+ rtnl_unlock();
}
/* release ADQ specific HW and SW resources */
@@ -608,11 +592,15 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
}
}
+
+ if (vsi->netdev)
+ netif_device_detach(vsi->netdev);
skip:
/* clear SW filtering DB */
ice_clear_hw_tbls(hw);
/* disable the VSIs and their queues that are not already DOWN */
+ set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state);
ice_pf_dis_all_vsi(pf, false);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
@@ -2970,6 +2958,9 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
if (avail < cpus / 2)
return -ENOMEM;
+ if (vsi->type == ICE_VSI_SF)
+ avail = vsi->alloc_txq;
+
vsi->num_xdp_txq = min_t(u16, avail, cpus);
if (vsi->num_xdp_txq < cpus)
@@ -3001,8 +2992,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
- bool if_running = netif_running(vsi->netdev);
int ret = 0, xdp_ring_err = 0;
+ bool if_running;
if (prog && !prog->aux->xdp_has_frags) {
if (frame_size > ice_max_xdp_frame_size(vsi)) {
@@ -3013,13 +3004,17 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
/* hot swap progs and avoid toggling link */
- if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
+ if (ice_is_xdp_ena_vsi(vsi) == !!prog ||
+ test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) {
ice_vsi_assign_bpf_prog(vsi, prog);
return 0;
}
+ if_running = netif_running(vsi->netdev) &&
+ !test_and_set_bit(ICE_VSI_DOWN, vsi->state);
+
/* need to stop netdev while setting up the program for Rx rings */
- if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
+ if (if_running) {
ret = ice_down(vsi);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
@@ -3081,25 +3076,32 @@ static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
* @dev: netdevice
* @xdp: XDP command
*/
-static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct ice_netdev_priv *np = netdev_priv(dev);
struct ice_vsi *vsi = np->vsi;
+ int ret;
- if (vsi->type != ICE_VSI_PF) {
- NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) {
+ NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI");
return -EINVAL;
}
+ mutex_lock(&vsi->xdp_state_lock);
+
switch (xdp->command) {
case XDP_SETUP_PROG:
- return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
+ ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
+ break;
case XDP_SETUP_XSK_POOL:
- return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
- xdp->xsk.queue_id);
+ ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+
+ mutex_unlock(&vsi->xdp_state_lock);
+ return ret;
}
/**
@@ -3541,28 +3543,6 @@ skip_req_irq:
}
/**
- * ice_napi_add - register NAPI handler for the VSI
- * @vsi: VSI for which NAPI handler is to be registered
- *
- * This function is only called in the driver's load path. Registering the NAPI
- * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
- * reset/rebuild, etc.)
- */
-static void ice_napi_add(struct ice_vsi *vsi)
-{
- int v_idx;
-
- if (!vsi->netdev)
- return;
-
- ice_for_each_q_vector(vsi, v_idx) {
- netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
- ice_napi_poll);
- __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
- }
-}
-
-/**
* ice_set_ops - set netdev and ethtools ops for the given netdev
* @vsi: the VSI associated with the new netdev
*/
@@ -3595,7 +3575,7 @@ static void ice_set_ops(struct ice_vsi *vsi)
* ice_set_netdev_features - set features for the given netdev
* @netdev: netdev instance
*/
-static void ice_set_netdev_features(struct net_device *netdev)
+void ice_set_netdev_features(struct net_device *netdev)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
@@ -3777,8 +3757,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
*
* net_device_ops implementation for adding VLAN IDs
*/
-static int
-ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi_vlan_ops *vlan_ops;
@@ -3840,8 +3819,7 @@ finish:
*
* net_device_ops implementation for removing VLAN IDs
*/
-static int
-ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
+int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi_vlan_ops *vlan_ops;
@@ -4010,6 +3988,9 @@ static void ice_deinit_pf(struct ice_pf *pf)
if (pf->ptp.clock)
ptp_clock_unregister(pf->ptp.clock);
+
+ xa_destroy(&pf->dyn_ports);
+ xa_destroy(&pf->sf_nums);
}
/**
@@ -4103,6 +4084,9 @@ static int ice_init_pf(struct ice_pf *pf)
hash_init(pf->vfs.table);
ice_mbx_init_snapshot(&pf->hw);
+ xa_init(&pf->dyn_ports);
+ xa_init(&pf->sf_nums);
+
return 0;
}
@@ -4535,16 +4519,10 @@ ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
u8 num_tx_sched_layers = hw->num_tx_sched_layers;
struct ice_pf *pf = hw->back;
struct device *dev;
- u8 *buf_copy;
int err;
dev = ice_pf_to_dev(pf);
- /* ice_cfg_tx_topo buf argument is not a constant,
- * so we have to make a copy
- */
- buf_copy = kmemdup(firmware->data, firmware->size, GFP_KERNEL);
-
- err = ice_cfg_tx_topo(hw, buf_copy, firmware->size);
+ err = ice_cfg_tx_topo(hw, firmware->data, firmware->size);
if (!err) {
if (hw->num_tx_sched_layers > num_tx_sched_layers)
dev_info(dev, "Tx scheduling layers switching feature disabled\n");
@@ -4772,14 +4750,12 @@ int ice_init_dev(struct ice_pf *pf)
ice_init_feature_support(pf);
err = ice_init_ddp_config(hw, pf);
- if (err)
- return err;
/* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be
* set in pf->state, which will cause ice_is_safe_mode to return
* true
*/
- if (ice_is_safe_mode(pf)) {
+ if (err || ice_is_safe_mode(pf)) {
/* we already got function/device capabilities but these don't
* reflect what the driver needs to do in safe mode. Instead of
* adding conditional logic everywhere to ignore these
@@ -5350,7 +5326,6 @@ err_load:
ice_deinit(pf);
err_init:
ice_adapter_put(pdev);
- pci_disable_device(pdev);
return err;
}
@@ -5445,6 +5420,7 @@ static void ice_remove(struct pci_dev *pdev)
ice_remove_arfs(pf);
devl_lock(priv_to_devlink(pf));
+ ice_dealloc_all_dynamic_ports(pf);
ice_deinit_devlink(pf);
ice_unload(pf);
@@ -5457,7 +5433,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_set_wake(pf);
ice_adapter_put(pdev);
- pci_disable_device(pdev);
}
/**
@@ -5537,7 +5512,9 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
if (ret)
goto err_reinit;
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ rtnl_lock();
ice_vsi_set_napi_queues(pf->vsi[v]);
+ rtnl_unlock();
}
ret = ice_req_irq_msix_misc(pf);
@@ -5551,8 +5528,12 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
err_reinit:
while (v--)
- if (pf->vsi[v])
+ if (pf->vsi[v]) {
+ rtnl_lock();
+ ice_vsi_clear_napi_queues(pf->vsi[v]);
+ rtnl_unlock();
ice_vsi_free_q_vectors(pf->vsi[v]);
+ }
return ret;
}
@@ -5617,6 +5598,9 @@ static int ice_suspend(struct device *dev)
ice_for_each_vsi(pf, v) {
if (!pf->vsi[v])
continue;
+ rtnl_lock();
+ ice_vsi_clear_napi_queues(pf->vsi[v]);
+ rtnl_unlock();
ice_vsi_free_q_vectors(pf->vsi[v]);
}
ice_clear_interrupt_scheme(pf);
@@ -5924,8 +5908,16 @@ static int __init ice_module_init(void)
goto err_dest_lag_wq;
}
+ status = ice_sf_driver_register();
+ if (status) {
+ pr_err("Failed to register SF driver, err %d\n", status);
+ goto err_sf_driver;
+ }
+
return 0;
+err_sf_driver:
+ pci_unregister_driver(&ice_driver);
err_dest_lag_wq:
destroy_workqueue(ice_lag_wq);
ice_debugfs_exit();
@@ -5943,6 +5935,7 @@ module_init(ice_module_init);
*/
static void __exit ice_module_exit(void)
{
+ ice_sf_driver_unregister();
pci_unregister_driver(&ice_driver);
ice_debugfs_exit();
destroy_workqueue(ice_wq);
@@ -6744,7 +6737,8 @@ static int ice_up_complete(struct ice_vsi *vsi)
if (vsi->port_info &&
(vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
- vsi->netdev && vsi->type == ICE_VSI_PF) {
+ ((vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)))) {
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
@@ -7102,7 +7096,6 @@ void ice_update_pf_stats(struct ice_pf *pf)
* @netdev: network interface device structure
* @stats: main device statistics structure
*/
-static
void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -7230,7 +7223,7 @@ int ice_down(struct ice_vsi *vsi)
if (tx_err)
netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
vsi->vsi_num, tx_err);
- if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
+ if (!tx_err && vsi->xdp_rings) {
tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
if (tx_err)
netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
@@ -7247,7 +7240,7 @@ int ice_down(struct ice_vsi *vsi)
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
- if (ice_is_xdp_ena_vsi(vsi))
+ if (vsi->xdp_rings)
ice_for_each_xdp_txq(vsi, i)
ice_clean_tx_ring(vsi->xdp_rings[i]);
@@ -7443,7 +7436,7 @@ int ice_vsi_open(struct ice_vsi *vsi)
ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
- if (vsi->type == ICE_VSI_PF) {
+ if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
if (err)
@@ -7452,6 +7445,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
if (err)
goto err_set_qs;
+
+ ice_vsi_set_napi_queues(vsi);
}
err = ice_up_complete(vsi);
@@ -7589,6 +7584,7 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf)
*/
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
bool dvm;
@@ -7731,6 +7727,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_rebuild_arfs(pf);
}
+ if (vsi && vsi->netdev)
+ netif_device_attach(vsi->netdev);
+
ice_update_pf_netdev_link(pf);
/* tell the firmware we are up */
@@ -7773,7 +7772,7 @@ clear_recovery:
*
* Returns 0 on success, negative on failure
*/
-static int ice_change_mtu(struct net_device *netdev, int new_mtu)
+int ice_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -8197,7 +8196,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
* @netdev: network interface device structure
* @txqueue: Tx queue
*/
-static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_tx_ring *tx_ring = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
index a2562f04267f..b9f383494b3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_osdep.h
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -12,6 +12,7 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
+#include <linux/iopoll.h>
#include <linux/pci_ids.h>
#ifndef CONFIG_64BIT
#include <linux/io-64-nonatomic-lo-hi.h>
@@ -23,6 +24,9 @@
#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define rd32_poll_timeout(a, addr, val, cond, delay_us, timeout_us) \
+ read_poll_timeout(rd32, val, cond, delay_us, timeout_us, false, a, addr)
+
#define ice_flush(a) rd32((a), GLGEN_STAT)
#define ICE_M(m, s) ((m ## U) << (s))
@@ -39,11 +43,10 @@ struct device *ice_hw_to_dev(struct ice_hw *hw);
#define ice_debug(hw, type, fmt, args...) \
dev_dbg(ice_hw_to_dev(hw), fmt, ##args)
-#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
- print_hex_dump_debug(KBUILD_MODNAME " ", \
- DUMP_PREFIX_OFFSET, rowsize, \
- groupsize, buf, len, false)
-#else
+#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \
+ print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, \
+ rowsize, groupsize, buf, len, false)
+#else /* CONFIG_DYNAMIC_DEBUG */
#define ice_debug(hw, type, fmt, args...) \
do { \
if ((type) & (hw)->debug_mask) \
@@ -51,16 +54,15 @@ do { \
} while (0)
#ifdef DEBUG
-#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \
do { \
if ((type) & (hw)->debug_mask) \
- print_hex_dump_debug(KBUILD_MODNAME, \
- DUMP_PREFIX_OFFSET, \
+ print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET,\
rowsize, groupsize, buf, \
len, false); \
} while (0)
-#else
-#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+#else /* DEBUG */
+#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \
do { \
struct ice_hw *hw_l = hw; \
if ((type) & (hw_l)->debug_mask) { \
@@ -78,4 +80,10 @@ do { \
#endif /* DEBUG */
#endif /* CONFIG_DYNAMIC_DEBUG */
+#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+ _ice_debug_array(hw, type, KBUILD_MODNAME, rowsize, groupsize, buf, len)
+
+#define ice_debug_array_w_prefix(hw, type, prefix, buf, len) \
+ _ice_debug_array(hw, type, prefix, 16, 1, buf, len)
+
#endif /* _ICE_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_parser.c b/drivers/net/ethernet/intel/ice/ice_parser.c
new file mode 100644
index 000000000000..664beb64f557
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_parser.c
@@ -0,0 +1,2430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "ice_common.h"
+
+struct ice_pkg_sect_hdr {
+ __le16 count;
+ __le16 offset;
+};
+
+/**
+ * ice_parser_sect_item_get - parse an item from a section
+ * @sect_type: section type
+ * @section: section object
+ * @index: index of the item to get
+ * @offset: dummy as prototype of ice_pkg_enum_entry's last parameter
+ *
+ * Return: a pointer to the item or NULL.
+ */
+static void *ice_parser_sect_item_get(u32 sect_type, void *section,
+ u32 index, u32 __maybe_unused *offset)
+{
+ size_t data_off = ICE_SEC_DATA_OFFSET;
+ struct ice_pkg_sect_hdr *hdr;
+ size_t size;
+
+ if (!section)
+ return NULL;
+
+ switch (sect_type) {
+ case ICE_SID_RXPARSER_IMEM:
+ size = ICE_SID_RXPARSER_IMEM_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_METADATA_INIT:
+ size = ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_CAM:
+ size = ICE_SID_RXPARSER_CAM_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_PG_SPILL:
+ size = ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_NOMATCH_CAM:
+ size = ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_NOMATCH_SPILL:
+ size = ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_BOOST_TCAM:
+ size = ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE;
+ break;
+ case ICE_SID_LBL_RXPARSER_TMEM:
+ data_off = ICE_SEC_LBL_DATA_OFFSET;
+ size = ICE_SID_LBL_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_MARKER_PTYPE:
+ size = ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_MARKER_GRP:
+ size = ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_PROTO_GRP:
+ size = ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_FLAG_REDIR:
+ size = ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE;
+ break;
+ default:
+ return NULL;
+ }
+
+ hdr = section;
+ if (index >= le16_to_cpu(hdr->count))
+ return NULL;
+
+ return section + data_off + index * size;
+}
+
+/**
+ * ice_parser_create_table - create an item table from a section
+ * @hw: pointer to the hardware structure
+ * @sect_type: section type
+ * @item_size: item size in bytes
+ * @length: number of items in the table to create
+ * @parse_item: the function to parse the item
+ * @no_offset: ignore header offset, calculate index from 0
+ *
+ * Return: a pointer to the allocated table or ERR_PTR.
+ */
+static void *
+ice_parser_create_table(struct ice_hw *hw, u32 sect_type,
+ u32 item_size, u32 length,
+ void (*parse_item)(struct ice_hw *hw, u16 idx,
+ void *item, void *data,
+ int size), bool no_offset)
+{
+ struct ice_pkg_enum state = {};
+ struct ice_seg *seg = hw->seg;
+ void *table, *data, *item;
+ u16 idx = 0;
+
+ if (!seg)
+ return ERR_PTR(-EINVAL);
+
+ table = kzalloc(item_size * length, GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ do {
+ data = ice_pkg_enum_entry(seg, &state, sect_type, NULL,
+ ice_parser_sect_item_get);
+ seg = NULL;
+ if (data) {
+ struct ice_pkg_sect_hdr *hdr = state.sect;
+
+ if (!no_offset)
+ idx = le16_to_cpu(hdr->offset) +
+ state.entry_idx;
+
+ item = (void *)((uintptr_t)table + idx * item_size);
+ parse_item(hw, idx, item, data, item_size);
+
+ if (no_offset)
+ idx++;
+ }
+ } while (data);
+
+ return table;
+}
+
+/*** ICE_SID_RXPARSER_IMEM section ***/
+static void ice_imem_bst_bm_dump(struct ice_hw *hw, struct ice_bst_main *bm)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "boost main:\n");
+ dev_info(dev, "\talu0 = %d\n", bm->alu0);
+ dev_info(dev, "\talu1 = %d\n", bm->alu1);
+ dev_info(dev, "\talu2 = %d\n", bm->alu2);
+ dev_info(dev, "\tpg = %d\n", bm->pg);
+}
+
+static void ice_imem_bst_kb_dump(struct ice_hw *hw,
+ struct ice_bst_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "boost key builder:\n");
+ dev_info(dev, "\tpriority = %d\n", kb->prio);
+ dev_info(dev, "\ttsr_ctrl = %d\n", kb->tsr_ctrl);
+}
+
+static void ice_imem_np_kb_dump(struct ice_hw *hw,
+ struct ice_np_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "next proto key builder:\n");
+ dev_info(dev, "\topc = %d\n", kb->opc);
+ dev_info(dev, "\tstart_or_reg0 = %d\n", kb->start_reg0);
+ dev_info(dev, "\tlen_or_reg1 = %d\n", kb->len_reg1);
+}
+
+static void ice_imem_pg_kb_dump(struct ice_hw *hw,
+ struct ice_pg_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "parse graph key builder:\n");
+ dev_info(dev, "\tflag0_ena = %d\n", kb->flag0_ena);
+ dev_info(dev, "\tflag1_ena = %d\n", kb->flag1_ena);
+ dev_info(dev, "\tflag2_ena = %d\n", kb->flag2_ena);
+ dev_info(dev, "\tflag3_ena = %d\n", kb->flag3_ena);
+ dev_info(dev, "\tflag0_idx = %d\n", kb->flag0_idx);
+ dev_info(dev, "\tflag1_idx = %d\n", kb->flag1_idx);
+ dev_info(dev, "\tflag2_idx = %d\n", kb->flag2_idx);
+ dev_info(dev, "\tflag3_idx = %d\n", kb->flag3_idx);
+ dev_info(dev, "\talu_reg_idx = %d\n", kb->alu_reg_idx);
+}
+
+static void ice_imem_alu_dump(struct ice_hw *hw,
+ struct ice_alu *alu, int index)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "alu%d:\n", index);
+ dev_info(dev, "\topc = %d\n", alu->opc);
+ dev_info(dev, "\tsrc_start = %d\n", alu->src_start);
+ dev_info(dev, "\tsrc_len = %d\n", alu->src_len);
+ dev_info(dev, "\tshift_xlate_sel = %d\n", alu->shift_xlate_sel);
+ dev_info(dev, "\tshift_xlate_key = %d\n", alu->shift_xlate_key);
+ dev_info(dev, "\tsrc_reg_id = %d\n", alu->src_reg_id);
+ dev_info(dev, "\tdst_reg_id = %d\n", alu->dst_reg_id);
+ dev_info(dev, "\tinc0 = %d\n", alu->inc0);
+ dev_info(dev, "\tinc1 = %d\n", alu->inc1);
+ dev_info(dev, "\tproto_offset_opc = %d\n", alu->proto_offset_opc);
+ dev_info(dev, "\tproto_offset = %d\n", alu->proto_offset);
+ dev_info(dev, "\tbranch_addr = %d\n", alu->branch_addr);
+ dev_info(dev, "\timm = %d\n", alu->imm);
+ dev_info(dev, "\tdst_start = %d\n", alu->dst_start);
+ dev_info(dev, "\tdst_len = %d\n", alu->dst_len);
+ dev_info(dev, "\tflags_extr_imm = %d\n", alu->flags_extr_imm);
+ dev_info(dev, "\tflags_start_imm= %d\n", alu->flags_start_imm);
+}
+
+/**
+ * ice_imem_dump - dump an imem item info
+ * @hw: pointer to the hardware structure
+ * @item: imem item to dump
+ */
+static void ice_imem_dump(struct ice_hw *hw, struct ice_imem_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %d\n", item->idx);
+ ice_imem_bst_bm_dump(hw, &item->b_m);
+ ice_imem_bst_kb_dump(hw, &item->b_kb);
+ dev_info(dev, "pg priority = %d\n", item->pg_prio);
+ ice_imem_np_kb_dump(hw, &item->np_kb);
+ ice_imem_pg_kb_dump(hw, &item->pg_kb);
+ ice_imem_alu_dump(hw, &item->alu0, 0);
+ ice_imem_alu_dump(hw, &item->alu1, 1);
+ ice_imem_alu_dump(hw, &item->alu2, 2);
+}
+
+#define ICE_IM_BM_ALU0 BIT(0)
+#define ICE_IM_BM_ALU1 BIT(1)
+#define ICE_IM_BM_ALU2 BIT(2)
+#define ICE_IM_BM_PG BIT(3)
+
+/**
+ * ice_imem_bm_init - parse 4 bits of Boost Main
+ * @bm: pointer to the Boost Main structure
+ * @data: Boost Main data to be parsed
+ */
+static void ice_imem_bm_init(struct ice_bst_main *bm, u8 data)
+{
+ bm->alu0 = FIELD_GET(ICE_IM_BM_ALU0, data);
+ bm->alu1 = FIELD_GET(ICE_IM_BM_ALU1, data);
+ bm->alu2 = FIELD_GET(ICE_IM_BM_ALU2, data);
+ bm->pg = FIELD_GET(ICE_IM_BM_PG, data);
+}
+
+#define ICE_IM_BKB_PRIO GENMASK(7, 0)
+#define ICE_IM_BKB_TSR_CTRL BIT(8)
+
+/**
+ * ice_imem_bkb_init - parse 10 bits of Boost Main Build
+ * @bkb: pointer to the Boost Main Build structure
+ * @data: Boost Main Build data to be parsed
+ */
+static void ice_imem_bkb_init(struct ice_bst_keybuilder *bkb, u16 data)
+{
+ bkb->prio = FIELD_GET(ICE_IM_BKB_PRIO, data);
+ bkb->tsr_ctrl = FIELD_GET(ICE_IM_BKB_TSR_CTRL, data);
+}
+
+#define ICE_IM_NPKB_OPC GENMASK(1, 0)
+#define ICE_IM_NPKB_S_R0 GENMASK(9, 2)
+#define ICE_IM_NPKB_L_R1 GENMASK(17, 10)
+
+/**
+ * ice_imem_npkb_init - parse 18 bits of Next Protocol Key Build
+ * @kb: pointer to the Next Protocol Key Build structure
+ * @data: Next Protocol Key Build data to be parsed
+ */
+static void ice_imem_npkb_init(struct ice_np_keybuilder *kb, u32 data)
+{
+ kb->opc = FIELD_GET(ICE_IM_NPKB_OPC, data);
+ kb->start_reg0 = FIELD_GET(ICE_IM_NPKB_S_R0, data);
+ kb->len_reg1 = FIELD_GET(ICE_IM_NPKB_L_R1, data);
+}
+
+#define ICE_IM_PGKB_F0_ENA BIT_ULL(0)
+#define ICE_IM_PGKB_F0_IDX GENMASK_ULL(6, 1)
+#define ICE_IM_PGKB_F1_ENA BIT_ULL(7)
+#define ICE_IM_PGKB_F1_IDX GENMASK_ULL(13, 8)
+#define ICE_IM_PGKB_F2_ENA BIT_ULL(14)
+#define ICE_IM_PGKB_F2_IDX GENMASK_ULL(20, 15)
+#define ICE_IM_PGKB_F3_ENA BIT_ULL(21)
+#define ICE_IM_PGKB_F3_IDX GENMASK_ULL(27, 22)
+#define ICE_IM_PGKB_AR_IDX GENMASK_ULL(34, 28)
+
+/**
+ * ice_imem_pgkb_init - parse 35 bits of Parse Graph Key Build
+ * @kb: pointer to the Parse Graph Key Build structure
+ * @data: Parse Graph Key Build data to be parsed
+ */
+static void ice_imem_pgkb_init(struct ice_pg_keybuilder *kb, u64 data)
+{
+ kb->flag0_ena = FIELD_GET(ICE_IM_PGKB_F0_ENA, data);
+ kb->flag0_idx = FIELD_GET(ICE_IM_PGKB_F0_IDX, data);
+ kb->flag1_ena = FIELD_GET(ICE_IM_PGKB_F1_ENA, data);
+ kb->flag1_idx = FIELD_GET(ICE_IM_PGKB_F1_IDX, data);
+ kb->flag2_ena = FIELD_GET(ICE_IM_PGKB_F2_ENA, data);
+ kb->flag2_idx = FIELD_GET(ICE_IM_PGKB_F2_IDX, data);
+ kb->flag3_ena = FIELD_GET(ICE_IM_PGKB_F3_ENA, data);
+ kb->flag3_idx = FIELD_GET(ICE_IM_PGKB_F3_IDX, data);
+ kb->alu_reg_idx = FIELD_GET(ICE_IM_PGKB_AR_IDX, data);
+}
+
+#define ICE_IM_ALU_OPC GENMASK_ULL(5, 0)
+#define ICE_IM_ALU_SS GENMASK_ULL(13, 6)
+#define ICE_IM_ALU_SL GENMASK_ULL(18, 14)
+#define ICE_IM_ALU_SXS BIT_ULL(19)
+#define ICE_IM_ALU_SXK GENMASK_ULL(23, 20)
+#define ICE_IM_ALU_SRID GENMASK_ULL(30, 24)
+#define ICE_IM_ALU_DRID GENMASK_ULL(37, 31)
+#define ICE_IM_ALU_INC0 BIT_ULL(38)
+#define ICE_IM_ALU_INC1 BIT_ULL(39)
+#define ICE_IM_ALU_POO GENMASK_ULL(41, 40)
+#define ICE_IM_ALU_PO GENMASK_ULL(49, 42)
+#define ICE_IM_ALU_BA_S 50 /* offset for the 2nd 64-bits field */
+#define ICE_IM_ALU_BA GENMASK_ULL(57 - ICE_IM_ALU_BA_S, \
+ 50 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_IMM GENMASK_ULL(73 - ICE_IM_ALU_BA_S, \
+ 58 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_DFE BIT_ULL(74 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_DS GENMASK_ULL(80 - ICE_IM_ALU_BA_S, \
+ 75 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_DL GENMASK_ULL(86 - ICE_IM_ALU_BA_S, \
+ 81 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_FEI BIT_ULL(87 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_FSI GENMASK_ULL(95 - ICE_IM_ALU_BA_S, \
+ 88 - ICE_IM_ALU_BA_S)
+
+/**
+ * ice_imem_alu_init - parse 96 bits of ALU entry
+ * @alu: pointer to the ALU entry structure
+ * @data: ALU entry data to be parsed
+ * @off: offset of the ALU entry data
+ */
+static void ice_imem_alu_init(struct ice_alu *alu, u8 *data, u8 off)
+{
+ u64 d64;
+ u8 idd;
+
+ d64 = *((u64 *)data) >> off;
+
+ alu->opc = FIELD_GET(ICE_IM_ALU_OPC, d64);
+ alu->src_start = FIELD_GET(ICE_IM_ALU_SS, d64);
+ alu->src_len = FIELD_GET(ICE_IM_ALU_SL, d64);
+ alu->shift_xlate_sel = FIELD_GET(ICE_IM_ALU_SXS, d64);
+ alu->shift_xlate_key = FIELD_GET(ICE_IM_ALU_SXK, d64);
+ alu->src_reg_id = FIELD_GET(ICE_IM_ALU_SRID, d64);
+ alu->dst_reg_id = FIELD_GET(ICE_IM_ALU_DRID, d64);
+ alu->inc0 = FIELD_GET(ICE_IM_ALU_INC0, d64);
+ alu->inc1 = FIELD_GET(ICE_IM_ALU_INC1, d64);
+ alu->proto_offset_opc = FIELD_GET(ICE_IM_ALU_POO, d64);
+ alu->proto_offset = FIELD_GET(ICE_IM_ALU_PO, d64);
+
+ idd = (ICE_IM_ALU_BA_S + off) / BITS_PER_BYTE;
+ off = (ICE_IM_ALU_BA_S + off) % BITS_PER_BYTE;
+ d64 = *((u64 *)(&data[idd])) >> off;
+
+ alu->branch_addr = FIELD_GET(ICE_IM_ALU_BA, d64);
+ alu->imm = FIELD_GET(ICE_IM_ALU_IMM, d64);
+ alu->dedicate_flags_ena = FIELD_GET(ICE_IM_ALU_DFE, d64);
+ alu->dst_start = FIELD_GET(ICE_IM_ALU_DS, d64);
+ alu->dst_len = FIELD_GET(ICE_IM_ALU_DL, d64);
+ alu->flags_extr_imm = FIELD_GET(ICE_IM_ALU_FEI, d64);
+ alu->flags_start_imm = FIELD_GET(ICE_IM_ALU_FSI, d64);
+}
+
+#define ICE_IMEM_BM_S 0
+#define ICE_IMEM_BKB_S 4
+#define ICE_IMEM_BKB_IDD (ICE_IMEM_BKB_S / BITS_PER_BYTE)
+#define ICE_IMEM_BKB_OFF (ICE_IMEM_BKB_S % BITS_PER_BYTE)
+#define ICE_IMEM_PGP GENMASK(15, 14)
+#define ICE_IMEM_NPKB_S 16
+#define ICE_IMEM_NPKB_IDD (ICE_IMEM_NPKB_S / BITS_PER_BYTE)
+#define ICE_IMEM_NPKB_OFF (ICE_IMEM_NPKB_S % BITS_PER_BYTE)
+#define ICE_IMEM_PGKB_S 34
+#define ICE_IMEM_PGKB_IDD (ICE_IMEM_PGKB_S / BITS_PER_BYTE)
+#define ICE_IMEM_PGKB_OFF (ICE_IMEM_PGKB_S % BITS_PER_BYTE)
+#define ICE_IMEM_ALU0_S 69
+#define ICE_IMEM_ALU0_IDD (ICE_IMEM_ALU0_S / BITS_PER_BYTE)
+#define ICE_IMEM_ALU0_OFF (ICE_IMEM_ALU0_S % BITS_PER_BYTE)
+#define ICE_IMEM_ALU1_S 165
+#define ICE_IMEM_ALU1_IDD (ICE_IMEM_ALU1_S / BITS_PER_BYTE)
+#define ICE_IMEM_ALU1_OFF (ICE_IMEM_ALU1_S % BITS_PER_BYTE)
+#define ICE_IMEM_ALU2_S 357
+#define ICE_IMEM_ALU2_IDD (ICE_IMEM_ALU2_S / BITS_PER_BYTE)
+#define ICE_IMEM_ALU2_OFF (ICE_IMEM_ALU2_S % BITS_PER_BYTE)
+
+/**
+ * ice_imem_parse_item - parse 384 bits of IMEM entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of IMEM entry
+ * @item: item of IMEM entry
+ * @data: IMEM entry data to be parsed
+ * @size: size of IMEM entry
+ */
+static void ice_imem_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_imem_item *ii = item;
+ u8 *buf = data;
+
+ ii->idx = idx;
+
+ ice_imem_bm_init(&ii->b_m, *(u8 *)buf);
+ ice_imem_bkb_init(&ii->b_kb,
+ *((u16 *)(&buf[ICE_IMEM_BKB_IDD])) >>
+ ICE_IMEM_BKB_OFF);
+
+ ii->pg_prio = FIELD_GET(ICE_IMEM_PGP, *(u16 *)buf);
+
+ ice_imem_npkb_init(&ii->np_kb,
+ *((u32 *)(&buf[ICE_IMEM_NPKB_IDD])) >>
+ ICE_IMEM_NPKB_OFF);
+ ice_imem_pgkb_init(&ii->pg_kb,
+ *((u64 *)(&buf[ICE_IMEM_PGKB_IDD])) >>
+ ICE_IMEM_PGKB_OFF);
+
+ ice_imem_alu_init(&ii->alu0,
+ &buf[ICE_IMEM_ALU0_IDD],
+ ICE_IMEM_ALU0_OFF);
+ ice_imem_alu_init(&ii->alu1,
+ &buf[ICE_IMEM_ALU1_IDD],
+ ICE_IMEM_ALU1_OFF);
+ ice_imem_alu_init(&ii->alu2,
+ &buf[ICE_IMEM_ALU2_IDD],
+ ICE_IMEM_ALU2_OFF);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_imem_dump(hw, ii);
+}
+
+/**
+ * ice_imem_table_get - create an imem table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated IMEM table.
+ */
+static struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_IMEM,
+ sizeof(struct ice_imem_item),
+ ICE_IMEM_TABLE_SIZE,
+ ice_imem_parse_item, false);
+}
+
+/*** ICE_SID_RXPARSER_METADATA_INIT section ***/
+/**
+ * ice_metainit_dump - dump an metainit item info
+ * @hw: pointer to the hardware structure
+ * @item: metainit item to dump
+ */
+static void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %d\n", item->idx);
+
+ dev_info(dev, "tsr = %d\n", item->tsr);
+ dev_info(dev, "ho = %d\n", item->ho);
+ dev_info(dev, "pc = %d\n", item->pc);
+ dev_info(dev, "pg_rn = %d\n", item->pg_rn);
+ dev_info(dev, "cd = %d\n", item->cd);
+
+ dev_info(dev, "gpr_a_ctrl = %d\n", item->gpr_a_ctrl);
+ dev_info(dev, "gpr_a_data_mdid = %d\n", item->gpr_a_data_mdid);
+ dev_info(dev, "gpr_a_data_start = %d\n", item->gpr_a_data_start);
+ dev_info(dev, "gpr_a_data_len = %d\n", item->gpr_a_data_len);
+ dev_info(dev, "gpr_a_id = %d\n", item->gpr_a_id);
+
+ dev_info(dev, "gpr_b_ctrl = %d\n", item->gpr_b_ctrl);
+ dev_info(dev, "gpr_b_data_mdid = %d\n", item->gpr_b_data_mdid);
+ dev_info(dev, "gpr_b_data_start = %d\n", item->gpr_b_data_start);
+ dev_info(dev, "gpr_b_data_len = %d\n", item->gpr_b_data_len);
+ dev_info(dev, "gpr_b_id = %d\n", item->gpr_b_id);
+
+ dev_info(dev, "gpr_c_ctrl = %d\n", item->gpr_c_ctrl);
+ dev_info(dev, "gpr_c_data_mdid = %d\n", item->gpr_c_data_mdid);
+ dev_info(dev, "gpr_c_data_start = %d\n", item->gpr_c_data_start);
+ dev_info(dev, "gpr_c_data_len = %d\n", item->gpr_c_data_len);
+ dev_info(dev, "gpr_c_id = %d\n", item->gpr_c_id);
+
+ dev_info(dev, "gpr_d_ctrl = %d\n", item->gpr_d_ctrl);
+ dev_info(dev, "gpr_d_data_mdid = %d\n", item->gpr_d_data_mdid);
+ dev_info(dev, "gpr_d_data_start = %d\n", item->gpr_d_data_start);
+ dev_info(dev, "gpr_d_data_len = %d\n", item->gpr_d_data_len);
+ dev_info(dev, "gpr_d_id = %d\n", item->gpr_d_id);
+
+ dev_info(dev, "flags = 0x%llx\n", (unsigned long long)(item->flags));
+}
+
+#define ICE_MI_TSR GENMASK_ULL(7, 0)
+#define ICE_MI_HO GENMASK_ULL(16, 8)
+#define ICE_MI_PC GENMASK_ULL(24, 17)
+#define ICE_MI_PGRN GENMASK_ULL(35, 25)
+#define ICE_MI_CD GENMASK_ULL(38, 36)
+#define ICE_MI_GAC BIT_ULL(39)
+#define ICE_MI_GADM GENMASK_ULL(44, 40)
+#define ICE_MI_GADS GENMASK_ULL(48, 45)
+#define ICE_MI_GADL GENMASK_ULL(53, 49)
+#define ICE_MI_GAI GENMASK_ULL(59, 56)
+#define ICE_MI_GBC BIT_ULL(60)
+#define ICE_MI_GBDM_S 61 /* offset for the 2nd 64-bits field */
+#define ICE_MI_GBDM_IDD (ICE_MI_GBDM_S / BITS_PER_BYTE)
+#define ICE_MI_GBDM_OFF (ICE_MI_GBDM_S % BITS_PER_BYTE)
+
+#define ICE_MI_GBDM_GENMASK_ULL(high, low) \
+ GENMASK_ULL((high) - ICE_MI_GBDM_S, (low) - ICE_MI_GBDM_S)
+#define ICE_MI_GBDM ICE_MI_GBDM_GENMASK_ULL(65, 61)
+#define ICE_MI_GBDS ICE_MI_GBDM_GENMASK_ULL(69, 66)
+#define ICE_MI_GBDL ICE_MI_GBDM_GENMASK_ULL(74, 70)
+#define ICE_MI_GBI ICE_MI_GBDM_GENMASK_ULL(80, 77)
+#define ICE_MI_GCC BIT_ULL(81 - ICE_MI_GBDM_S)
+#define ICE_MI_GCDM ICE_MI_GBDM_GENMASK_ULL(86, 82)
+#define ICE_MI_GCDS ICE_MI_GBDM_GENMASK_ULL(90, 87)
+#define ICE_MI_GCDL ICE_MI_GBDM_GENMASK_ULL(95, 91)
+#define ICE_MI_GCI ICE_MI_GBDM_GENMASK_ULL(101, 98)
+#define ICE_MI_GDC BIT_ULL(102 - ICE_MI_GBDM_S)
+#define ICE_MI_GDDM ICE_MI_GBDM_GENMASK_ULL(107, 103)
+#define ICE_MI_GDDS ICE_MI_GBDM_GENMASK_ULL(111, 108)
+#define ICE_MI_GDDL ICE_MI_GBDM_GENMASK_ULL(116, 112)
+#define ICE_MI_GDI ICE_MI_GBDM_GENMASK_ULL(122, 119)
+#define ICE_MI_FLAG_S 123 /* offset for the 3rd 64-bits field */
+#define ICE_MI_FLAG_IDD (ICE_MI_FLAG_S / BITS_PER_BYTE)
+#define ICE_MI_FLAG_OFF (ICE_MI_FLAG_S % BITS_PER_BYTE)
+#define ICE_MI_FLAG GENMASK_ULL(186 - ICE_MI_FLAG_S, \
+ 123 - ICE_MI_FLAG_S)
+
+/**
+ * ice_metainit_parse_item - parse 192 bits of Metadata Init entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Metadata Init entry
+ * @item: item of Metadata Init entry
+ * @data: Metadata Init entry data to be parsed
+ * @size: size of Metadata Init entry
+ */
+static void ice_metainit_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_metainit_item *mi = item;
+ u8 *buf = data;
+ u64 d64;
+
+ mi->idx = idx;
+
+ d64 = *(u64 *)buf;
+
+ mi->tsr = FIELD_GET(ICE_MI_TSR, d64);
+ mi->ho = FIELD_GET(ICE_MI_HO, d64);
+ mi->pc = FIELD_GET(ICE_MI_PC, d64);
+ mi->pg_rn = FIELD_GET(ICE_MI_PGRN, d64);
+ mi->cd = FIELD_GET(ICE_MI_CD, d64);
+
+ mi->gpr_a_ctrl = FIELD_GET(ICE_MI_GAC, d64);
+ mi->gpr_a_data_mdid = FIELD_GET(ICE_MI_GADM, d64);
+ mi->gpr_a_data_start = FIELD_GET(ICE_MI_GADS, d64);
+ mi->gpr_a_data_len = FIELD_GET(ICE_MI_GADL, d64);
+ mi->gpr_a_id = FIELD_GET(ICE_MI_GAI, d64);
+
+ mi->gpr_b_ctrl = FIELD_GET(ICE_MI_GBC, d64);
+
+ d64 = *((u64 *)&buf[ICE_MI_GBDM_IDD]) >> ICE_MI_GBDM_OFF;
+
+ mi->gpr_b_data_mdid = FIELD_GET(ICE_MI_GBDM, d64);
+ mi->gpr_b_data_start = FIELD_GET(ICE_MI_GBDS, d64);
+ mi->gpr_b_data_len = FIELD_GET(ICE_MI_GBDL, d64);
+ mi->gpr_b_id = FIELD_GET(ICE_MI_GBI, d64);
+
+ mi->gpr_c_ctrl = FIELD_GET(ICE_MI_GCC, d64);
+ mi->gpr_c_data_mdid = FIELD_GET(ICE_MI_GCDM, d64);
+ mi->gpr_c_data_start = FIELD_GET(ICE_MI_GCDS, d64);
+ mi->gpr_c_data_len = FIELD_GET(ICE_MI_GCDL, d64);
+ mi->gpr_c_id = FIELD_GET(ICE_MI_GCI, d64);
+
+ mi->gpr_d_ctrl = FIELD_GET(ICE_MI_GDC, d64);
+ mi->gpr_d_data_mdid = FIELD_GET(ICE_MI_GDDM, d64);
+ mi->gpr_d_data_start = FIELD_GET(ICE_MI_GDDS, d64);
+ mi->gpr_d_data_len = FIELD_GET(ICE_MI_GDDL, d64);
+ mi->gpr_d_id = FIELD_GET(ICE_MI_GDI, d64);
+
+ d64 = *((u64 *)&buf[ICE_MI_FLAG_IDD]) >> ICE_MI_FLAG_OFF;
+
+ mi->flags = FIELD_GET(ICE_MI_FLAG, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_metainit_dump(hw, mi);
+}
+
+/**
+ * ice_metainit_table_get - create a metainit table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Metadata initialization table.
+ */
+static struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_METADATA_INIT,
+ sizeof(struct ice_metainit_item),
+ ICE_METAINIT_TABLE_SIZE,
+ ice_metainit_parse_item, false);
+}
+
+/**
+ * ice_bst_tcam_search - find a TCAM item with specific type
+ * @tcam_table: the TCAM table
+ * @lbl_table: the lbl table to search
+ * @type: the type we need to match against
+ * @start: start searching from this index
+ *
+ * Return: a pointer to the matching BOOST TCAM item or NULL.
+ */
+struct ice_bst_tcam_item *
+ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table,
+ struct ice_lbl_item *lbl_table,
+ enum ice_lbl_type type, u16 *start)
+{
+ u16 i = *start;
+
+ for (; i < ICE_BST_TCAM_TABLE_SIZE; i++) {
+ if (lbl_table[i].type == type) {
+ *start = i;
+ return &tcam_table[lbl_table[i].idx];
+ }
+ }
+
+ return NULL;
+}
+
+/*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL,
+ * ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM
+ * sections ***/
+static void ice_pg_cam_key_dump(struct ice_hw *hw, struct ice_pg_cam_key *key)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "key:\n");
+ dev_info(dev, "\tvalid = %d\n", key->valid);
+ dev_info(dev, "\tnode_id = %d\n", key->node_id);
+ dev_info(dev, "\tflag0 = %d\n", key->flag0);
+ dev_info(dev, "\tflag1 = %d\n", key->flag1);
+ dev_info(dev, "\tflag2 = %d\n", key->flag2);
+ dev_info(dev, "\tflag3 = %d\n", key->flag3);
+ dev_info(dev, "\tboost_idx = %d\n", key->boost_idx);
+ dev_info(dev, "\talu_reg = 0x%04x\n", key->alu_reg);
+ dev_info(dev, "\tnext_proto = 0x%08x\n", key->next_proto);
+}
+
+static void ice_pg_nm_cam_key_dump(struct ice_hw *hw,
+ struct ice_pg_nm_cam_key *key)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "key:\n");
+ dev_info(dev, "\tvalid = %d\n", key->valid);
+ dev_info(dev, "\tnode_id = %d\n", key->node_id);
+ dev_info(dev, "\tflag0 = %d\n", key->flag0);
+ dev_info(dev, "\tflag1 = %d\n", key->flag1);
+ dev_info(dev, "\tflag2 = %d\n", key->flag2);
+ dev_info(dev, "\tflag3 = %d\n", key->flag3);
+ dev_info(dev, "\tboost_idx = %d\n", key->boost_idx);
+ dev_info(dev, "\talu_reg = 0x%04x\n", key->alu_reg);
+}
+
+static void ice_pg_cam_action_dump(struct ice_hw *hw,
+ struct ice_pg_cam_action *action)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "action:\n");
+ dev_info(dev, "\tnext_node = %d\n", action->next_node);
+ dev_info(dev, "\tnext_pc = %d\n", action->next_pc);
+ dev_info(dev, "\tis_pg = %d\n", action->is_pg);
+ dev_info(dev, "\tproto_id = %d\n", action->proto_id);
+ dev_info(dev, "\tis_mg = %d\n", action->is_mg);
+ dev_info(dev, "\tmarker_id = %d\n", action->marker_id);
+ dev_info(dev, "\tis_last_round = %d\n", action->is_last_round);
+ dev_info(dev, "\tho_polarity = %d\n", action->ho_polarity);
+ dev_info(dev, "\tho_inc = %d\n", action->ho_inc);
+}
+
+/**
+ * ice_pg_cam_dump - dump an parse graph cam info
+ * @hw: pointer to the hardware structure
+ * @item: parse graph cam to dump
+ */
+static void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item)
+{
+ dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
+ ice_pg_cam_key_dump(hw, &item->key);
+ ice_pg_cam_action_dump(hw, &item->action);
+}
+
+/**
+ * ice_pg_nm_cam_dump - dump an parse graph no match cam info
+ * @hw: pointer to the hardware structure
+ * @item: parse graph no match cam to dump
+ */
+static void ice_pg_nm_cam_dump(struct ice_hw *hw,
+ struct ice_pg_nm_cam_item *item)
+{
+ dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
+ ice_pg_nm_cam_key_dump(hw, &item->key);
+ ice_pg_cam_action_dump(hw, &item->action);
+}
+
+#define ICE_PGCA_NN GENMASK_ULL(10, 0)
+#define ICE_PGCA_NPC GENMASK_ULL(18, 11)
+#define ICE_PGCA_IPG BIT_ULL(19)
+#define ICE_PGCA_PID GENMASK_ULL(30, 23)
+#define ICE_PGCA_IMG BIT_ULL(31)
+#define ICE_PGCA_MID GENMASK_ULL(39, 32)
+#define ICE_PGCA_ILR BIT_ULL(40)
+#define ICE_PGCA_HOP BIT_ULL(41)
+#define ICE_PGCA_HOI GENMASK_ULL(50, 42)
+
+/**
+ * ice_pg_cam_action_init - parse 55 bits of Parse Graph CAM Action
+ * @action: pointer to the Parse Graph CAM Action structure
+ * @data: Parse Graph CAM Action data to be parsed
+ */
+static void ice_pg_cam_action_init(struct ice_pg_cam_action *action, u64 data)
+{
+ action->next_node = FIELD_GET(ICE_PGCA_NN, data);
+ action->next_pc = FIELD_GET(ICE_PGCA_NPC, data);
+ action->is_pg = FIELD_GET(ICE_PGCA_IPG, data);
+ action->proto_id = FIELD_GET(ICE_PGCA_PID, data);
+ action->is_mg = FIELD_GET(ICE_PGCA_IMG, data);
+ action->marker_id = FIELD_GET(ICE_PGCA_MID, data);
+ action->is_last_round = FIELD_GET(ICE_PGCA_ILR, data);
+ action->ho_polarity = FIELD_GET(ICE_PGCA_HOP, data);
+ action->ho_inc = FIELD_GET(ICE_PGCA_HOI, data);
+}
+
+#define ICE_PGNCK_VLD BIT_ULL(0)
+#define ICE_PGNCK_NID GENMASK_ULL(11, 1)
+#define ICE_PGNCK_F0 BIT_ULL(12)
+#define ICE_PGNCK_F1 BIT_ULL(13)
+#define ICE_PGNCK_F2 BIT_ULL(14)
+#define ICE_PGNCK_F3 BIT_ULL(15)
+#define ICE_PGNCK_BH BIT_ULL(16)
+#define ICE_PGNCK_BI GENMASK_ULL(24, 17)
+#define ICE_PGNCK_AR GENMASK_ULL(40, 25)
+
+/**
+ * ice_pg_nm_cam_key_init - parse 41 bits of Parse Graph NoMatch CAM Key
+ * @key: pointer to the Parse Graph NoMatch CAM Key structure
+ * @data: Parse Graph NoMatch CAM Key data to be parsed
+ */
+static void ice_pg_nm_cam_key_init(struct ice_pg_nm_cam_key *key, u64 data)
+{
+ key->valid = FIELD_GET(ICE_PGNCK_VLD, data);
+ key->node_id = FIELD_GET(ICE_PGNCK_NID, data);
+ key->flag0 = FIELD_GET(ICE_PGNCK_F0, data);
+ key->flag1 = FIELD_GET(ICE_PGNCK_F1, data);
+ key->flag2 = FIELD_GET(ICE_PGNCK_F2, data);
+ key->flag3 = FIELD_GET(ICE_PGNCK_F3, data);
+
+ if (FIELD_GET(ICE_PGNCK_BH, data))
+ key->boost_idx = FIELD_GET(ICE_PGNCK_BI, data);
+ else
+ key->boost_idx = 0;
+
+ key->alu_reg = FIELD_GET(ICE_PGNCK_AR, data);
+}
+
+#define ICE_PGCK_VLD BIT_ULL(0)
+#define ICE_PGCK_NID GENMASK_ULL(11, 1)
+#define ICE_PGCK_F0 BIT_ULL(12)
+#define ICE_PGCK_F1 BIT_ULL(13)
+#define ICE_PGCK_F2 BIT_ULL(14)
+#define ICE_PGCK_F3 BIT_ULL(15)
+#define ICE_PGCK_BH BIT_ULL(16)
+#define ICE_PGCK_BI GENMASK_ULL(24, 17)
+#define ICE_PGCK_AR GENMASK_ULL(40, 25)
+#define ICE_PGCK_NPK_S 41 /* offset for the 2nd 64-bits field */
+#define ICE_PGCK_NPK_IDD (ICE_PGCK_NPK_S / BITS_PER_BYTE)
+#define ICE_PGCK_NPK_OFF (ICE_PGCK_NPK_S % BITS_PER_BYTE)
+#define ICE_PGCK_NPK GENMASK_ULL(72 - ICE_PGCK_NPK_S, \
+ 41 - ICE_PGCK_NPK_S)
+
+/**
+ * ice_pg_cam_key_init - parse 73 bits of Parse Graph CAM Key
+ * @key: pointer to the Parse Graph CAM Key structure
+ * @data: Parse Graph CAM Key data to be parsed
+ */
+static void ice_pg_cam_key_init(struct ice_pg_cam_key *key, u8 *data)
+{
+ u64 d64 = *(u64 *)data;
+
+ key->valid = FIELD_GET(ICE_PGCK_VLD, d64);
+ key->node_id = FIELD_GET(ICE_PGCK_NID, d64);
+ key->flag0 = FIELD_GET(ICE_PGCK_F0, d64);
+ key->flag1 = FIELD_GET(ICE_PGCK_F1, d64);
+ key->flag2 = FIELD_GET(ICE_PGCK_F2, d64);
+ key->flag3 = FIELD_GET(ICE_PGCK_F3, d64);
+
+ if (FIELD_GET(ICE_PGCK_BH, d64))
+ key->boost_idx = FIELD_GET(ICE_PGCK_BI, d64);
+ else
+ key->boost_idx = 0;
+
+ key->alu_reg = FIELD_GET(ICE_PGCK_AR, d64);
+
+ d64 = *((u64 *)&data[ICE_PGCK_NPK_IDD]) >> ICE_PGCK_NPK_OFF;
+
+ key->next_proto = FIELD_GET(ICE_PGCK_NPK, d64);
+}
+
+#define ICE_PG_CAM_ACT_S 73
+#define ICE_PG_CAM_ACT_IDD (ICE_PG_CAM_ACT_S / BITS_PER_BYTE)
+#define ICE_PG_CAM_ACT_OFF (ICE_PG_CAM_ACT_S % BITS_PER_BYTE)
+
+/**
+ * ice_pg_cam_parse_item - parse 128 bits of Parse Graph CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph CAM Entry
+ * @item: item of Parse Graph CAM Entry
+ * @data: Parse Graph CAM Entry data to be parsed
+ * @size: size of Parse Graph CAM Entry
+ */
+static void ice_pg_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_pg_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ ice_pg_cam_key_init(&ci->key, buf);
+
+ d64 = *((u64 *)&buf[ICE_PG_CAM_ACT_IDD]) >> ICE_PG_CAM_ACT_OFF;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_cam_dump(hw, ci);
+}
+
+#define ICE_PG_SP_CAM_KEY_S 56
+#define ICE_PG_SP_CAM_KEY_IDD (ICE_PG_SP_CAM_KEY_S / BITS_PER_BYTE)
+
+/**
+ * ice_pg_sp_cam_parse_item - parse 136 bits of Parse Graph Spill CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph Spill CAM Entry
+ * @item: item of Parse Graph Spill CAM Entry
+ * @data: Parse Graph Spill CAM Entry data to be parsed
+ * @size: size of Parse Graph Spill CAM Entry
+ */
+static void ice_pg_sp_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_pg_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ d64 = *(u64 *)buf;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ ice_pg_cam_key_init(&ci->key, &buf[ICE_PG_SP_CAM_KEY_IDD]);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_cam_dump(hw, ci);
+}
+
+#define ICE_PG_NM_CAM_ACT_S 41
+#define ICE_PG_NM_CAM_ACT_IDD (ICE_PG_NM_CAM_ACT_S / BITS_PER_BYTE)
+#define ICE_PG_NM_CAM_ACT_OFF (ICE_PG_NM_CAM_ACT_S % BITS_PER_BYTE)
+
+/**
+ * ice_pg_nm_cam_parse_item - parse 96 bits of Parse Graph NoMatch CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph NoMatch CAM Entry
+ * @item: item of Parse Graph NoMatch CAM Entry
+ * @data: Parse Graph NoMatch CAM Entry data to be parsed
+ * @size: size of Parse Graph NoMatch CAM Entry
+ */
+static void ice_pg_nm_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_pg_nm_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ d64 = *(u64 *)buf;
+ ice_pg_nm_cam_key_init(&ci->key, d64);
+
+ d64 = *((u64 *)&buf[ICE_PG_NM_CAM_ACT_IDD]) >> ICE_PG_NM_CAM_ACT_OFF;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_nm_cam_dump(hw, ci);
+}
+
+#define ICE_PG_NM_SP_CAM_ACT_S 56
+#define ICE_PG_NM_SP_CAM_ACT_IDD (ICE_PG_NM_SP_CAM_ACT_S / BITS_PER_BYTE)
+#define ICE_PG_NM_SP_CAM_ACT_OFF (ICE_PG_NM_SP_CAM_ACT_S % BITS_PER_BYTE)
+
+/**
+ * ice_pg_nm_sp_cam_parse_item - parse 104 bits of Parse Graph NoMatch Spill
+ * CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph NoMatch Spill CAM Entry
+ * @item: item of Parse Graph NoMatch Spill CAM Entry
+ * @data: Parse Graph NoMatch Spill CAM Entry data to be parsed
+ * @size: size of Parse Graph NoMatch Spill CAM Entry
+ */
+static void ice_pg_nm_sp_cam_parse_item(struct ice_hw *hw, u16 idx,
+ void *item, void *data,
+ int __maybe_unused size)
+{
+ struct ice_pg_nm_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ d64 = *(u64 *)buf;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ d64 = *((u64 *)&buf[ICE_PG_NM_SP_CAM_ACT_IDD]) >>
+ ICE_PG_NM_SP_CAM_ACT_OFF;
+ ice_pg_nm_cam_key_init(&ci->key, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_nm_cam_dump(hw, ci);
+}
+
+/**
+ * ice_pg_cam_table_get - create a parse graph cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph CAM table.
+ */
+static struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_CAM,
+ sizeof(struct ice_pg_cam_item),
+ ICE_PG_CAM_TABLE_SIZE,
+ ice_pg_cam_parse_item, false);
+}
+
+/**
+ * ice_pg_sp_cam_table_get - create a parse graph spill cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph Spill CAM table.
+ */
+static struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_PG_SPILL,
+ sizeof(struct ice_pg_cam_item),
+ ICE_PG_SP_CAM_TABLE_SIZE,
+ ice_pg_sp_cam_parse_item, false);
+}
+
+/**
+ * ice_pg_nm_cam_table_get - create a parse graph no match cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph No Match CAM table.
+ */
+static struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_CAM,
+ sizeof(struct ice_pg_nm_cam_item),
+ ICE_PG_NM_CAM_TABLE_SIZE,
+ ice_pg_nm_cam_parse_item, false);
+}
+
+/**
+ * ice_pg_nm_sp_cam_table_get - create a parse graph no match spill cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph No Match Spill CAM table.
+ */
+static struct ice_pg_nm_cam_item *ice_pg_nm_sp_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_SPILL,
+ sizeof(struct ice_pg_nm_cam_item),
+ ICE_PG_NM_SP_CAM_TABLE_SIZE,
+ ice_pg_nm_sp_cam_parse_item, false);
+}
+
+static bool __ice_pg_cam_match(struct ice_pg_cam_item *item,
+ struct ice_pg_cam_key *key)
+{
+ return (item->key.valid &&
+ !memcmp(&item->key.val, &key->val, sizeof(key->val)));
+}
+
+static bool __ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *item,
+ struct ice_pg_cam_key *key)
+{
+ return (item->key.valid &&
+ !memcmp(&item->key.val, &key->val, sizeof(item->key.val)));
+}
+
+/**
+ * ice_pg_cam_match - search parse graph cam table by key
+ * @table: parse graph cam table to search
+ * @size: cam table size
+ * @key: search key
+ *
+ * Return: a pointer to the matching PG CAM item or NULL.
+ */
+struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table,
+ int size, struct ice_pg_cam_key *key)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ struct ice_pg_cam_item *item = &table[i];
+
+ if (__ice_pg_cam_match(item, key))
+ return item;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_pg_nm_cam_match - search parse graph no match cam table by key
+ * @table: parse graph no match cam table to search
+ * @size: cam table size
+ * @key: search key
+ *
+ * Return: a pointer to the matching PG No Match CAM item or NULL.
+ */
+struct ice_pg_nm_cam_item *
+ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
+ struct ice_pg_cam_key *key)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ struct ice_pg_nm_cam_item *item = &table[i];
+
+ if (__ice_pg_nm_cam_match(item, key))
+ return item;
+ }
+
+ return NULL;
+}
+
+/*** Ternary match ***/
+/* Perform a ternary match on a 1-byte pattern (@pat) given @key and @key_inv
+ * Rules (per bit):
+ * Key == 0 and Key_inv == 0 : Never match (Don't care)
+ * Key == 0 and Key_inv == 1 : Match on bit == 1
+ * Key == 1 and Key_inv == 0 : Match on bit == 0
+ * Key == 1 and Key_inv == 1 : Always match (Don't care)
+ *
+ * Return: true if all bits match, false otherwise.
+ */
+static bool ice_ternary_match_byte(u8 key, u8 key_inv, u8 pat)
+{
+ u8 bit_key, bit_key_inv, bit_pat;
+ int i;
+
+ for (i = 0; i < BITS_PER_BYTE; i++) {
+ bit_key = key & BIT(i);
+ bit_key_inv = key_inv & BIT(i);
+ bit_pat = pat & BIT(i);
+
+ if (bit_key != 0 && bit_key_inv != 0)
+ continue;
+
+ if ((bit_key == 0 && bit_key_inv == 0) || bit_key == bit_pat)
+ return false;
+ }
+
+ return true;
+}
+
+static bool ice_ternary_match(const u8 *key, const u8 *key_inv,
+ const u8 *pat, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (!ice_ternary_match_byte(key[i], key_inv[i], pat[i]))
+ return false;
+
+ return true;
+}
+
+/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
+static void ice_bst_np_kb_dump(struct ice_hw *hw, struct ice_np_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "next proto key builder:\n");
+ dev_info(dev, "\topc = %d\n", kb->opc);
+ dev_info(dev, "\tstart_reg0 = %d\n", kb->start_reg0);
+ dev_info(dev, "\tlen_reg1 = %d\n", kb->len_reg1);
+}
+
+static void ice_bst_pg_kb_dump(struct ice_hw *hw, struct ice_pg_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "parse graph key builder:\n");
+ dev_info(dev, "\tflag0_ena = %d\n", kb->flag0_ena);
+ dev_info(dev, "\tflag1_ena = %d\n", kb->flag1_ena);
+ dev_info(dev, "\tflag2_ena = %d\n", kb->flag2_ena);
+ dev_info(dev, "\tflag3_ena = %d\n", kb->flag3_ena);
+ dev_info(dev, "\tflag0_idx = %d\n", kb->flag0_idx);
+ dev_info(dev, "\tflag1_idx = %d\n", kb->flag1_idx);
+ dev_info(dev, "\tflag2_idx = %d\n", kb->flag2_idx);
+ dev_info(dev, "\tflag3_idx = %d\n", kb->flag3_idx);
+ dev_info(dev, "\talu_reg_idx = %d\n", kb->alu_reg_idx);
+}
+
+static void ice_bst_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int idx)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "alu%d:\n", idx);
+ dev_info(dev, "\topc = %d\n", alu->opc);
+ dev_info(dev, "\tsrc_start = %d\n", alu->src_start);
+ dev_info(dev, "\tsrc_len = %d\n", alu->src_len);
+ dev_info(dev, "\tshift_xlate_sel = %d\n", alu->shift_xlate_sel);
+ dev_info(dev, "\tshift_xlate_key = %d\n", alu->shift_xlate_key);
+ dev_info(dev, "\tsrc_reg_id = %d\n", alu->src_reg_id);
+ dev_info(dev, "\tdst_reg_id = %d\n", alu->dst_reg_id);
+ dev_info(dev, "\tinc0 = %d\n", alu->inc0);
+ dev_info(dev, "\tinc1 = %d\n", alu->inc1);
+ dev_info(dev, "\tproto_offset_opc = %d\n", alu->proto_offset_opc);
+ dev_info(dev, "\tproto_offset = %d\n", alu->proto_offset);
+ dev_info(dev, "\tbranch_addr = %d\n", alu->branch_addr);
+ dev_info(dev, "\timm = %d\n", alu->imm);
+ dev_info(dev, "\tdst_start = %d\n", alu->dst_start);
+ dev_info(dev, "\tdst_len = %d\n", alu->dst_len);
+ dev_info(dev, "\tflags_extr_imm = %d\n", alu->flags_extr_imm);
+ dev_info(dev, "\tflags_start_imm= %d\n", alu->flags_start_imm);
+}
+
+/**
+ * ice_bst_tcam_dump - dump a boost tcam info
+ * @hw: pointer to the hardware structure
+ * @item: boost tcam to dump
+ */
+static void ice_bst_tcam_dump(struct ice_hw *hw, struct ice_bst_tcam_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "addr = %d\n", item->addr);
+
+ dev_info(dev, "key : ");
+ for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key[i]);
+
+ dev_info(dev, "\n");
+
+ dev_info(dev, "key_inv: ");
+ for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key_inv[i]);
+
+ dev_info(dev, "\n");
+
+ dev_info(dev, "hit_idx_grp = %d\n", item->hit_idx_grp);
+ dev_info(dev, "pg_prio = %d\n", item->pg_prio);
+
+ ice_bst_np_kb_dump(hw, &item->np_kb);
+ ice_bst_pg_kb_dump(hw, &item->pg_kb);
+
+ ice_bst_alu_dump(hw, &item->alu0, ICE_ALU0_IDX);
+ ice_bst_alu_dump(hw, &item->alu1, ICE_ALU1_IDX);
+ ice_bst_alu_dump(hw, &item->alu2, ICE_ALU2_IDX);
+}
+
+static void ice_lbl_dump(struct ice_hw *hw, struct ice_lbl_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %u\n", item->idx);
+ dev_info(dev, "type = %u\n", item->type);
+ dev_info(dev, "label = %s\n", item->label);
+}
+
+#define ICE_BST_ALU_OPC GENMASK_ULL(5, 0)
+#define ICE_BST_ALU_SS GENMASK_ULL(13, 6)
+#define ICE_BST_ALU_SL GENMASK_ULL(18, 14)
+#define ICE_BST_ALU_SXS BIT_ULL(19)
+#define ICE_BST_ALU_SXK GENMASK_ULL(23, 20)
+#define ICE_BST_ALU_SRID GENMASK_ULL(30, 24)
+#define ICE_BST_ALU_DRID GENMASK_ULL(37, 31)
+#define ICE_BST_ALU_INC0 BIT_ULL(38)
+#define ICE_BST_ALU_INC1 BIT_ULL(39)
+#define ICE_BST_ALU_POO GENMASK_ULL(41, 40)
+#define ICE_BST_ALU_PO GENMASK_ULL(49, 42)
+#define ICE_BST_ALU_BA_S 50 /* offset for the 2nd 64-bits field */
+#define ICE_BST_ALU_BA GENMASK_ULL(57 - ICE_BST_ALU_BA_S, \
+ 50 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_IMM GENMASK_ULL(73 - ICE_BST_ALU_BA_S, \
+ 58 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_DFE BIT_ULL(74 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_DS GENMASK_ULL(80 - ICE_BST_ALU_BA_S, \
+ 75 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_DL GENMASK_ULL(86 - ICE_BST_ALU_BA_S, \
+ 81 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_FEI BIT_ULL(87 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_FSI GENMASK_ULL(95 - ICE_BST_ALU_BA_S, \
+ 88 - ICE_BST_ALU_BA_S)
+
+/**
+ * ice_bst_alu_init - parse 96 bits of ALU entry
+ * @alu: pointer to the ALU entry structure
+ * @data: ALU entry data to be parsed
+ * @off: offset of the ALU entry data
+ */
+static void ice_bst_alu_init(struct ice_alu *alu, u8 *data, u8 off)
+{
+ u64 d64;
+ u8 idd;
+
+ d64 = *((u64 *)data) >> off;
+
+ alu->opc = FIELD_GET(ICE_BST_ALU_OPC, d64);
+ alu->src_start = FIELD_GET(ICE_BST_ALU_SS, d64);
+ alu->src_len = FIELD_GET(ICE_BST_ALU_SL, d64);
+ alu->shift_xlate_sel = FIELD_GET(ICE_BST_ALU_SXS, d64);
+ alu->shift_xlate_key = FIELD_GET(ICE_BST_ALU_SXK, d64);
+ alu->src_reg_id = FIELD_GET(ICE_BST_ALU_SRID, d64);
+ alu->dst_reg_id = FIELD_GET(ICE_BST_ALU_DRID, d64);
+ alu->inc0 = FIELD_GET(ICE_BST_ALU_INC0, d64);
+ alu->inc1 = FIELD_GET(ICE_BST_ALU_INC1, d64);
+ alu->proto_offset_opc = FIELD_GET(ICE_BST_ALU_POO, d64);
+ alu->proto_offset = FIELD_GET(ICE_BST_ALU_PO, d64);
+
+ idd = (ICE_BST_ALU_BA_S + off) / BITS_PER_BYTE;
+ off = (ICE_BST_ALU_BA_S + off) % BITS_PER_BYTE;
+ d64 = *((u64 *)(&data[idd])) >> off;
+
+ alu->branch_addr = FIELD_GET(ICE_BST_ALU_BA, d64);
+ alu->imm = FIELD_GET(ICE_BST_ALU_IMM, d64);
+ alu->dedicate_flags_ena = FIELD_GET(ICE_BST_ALU_DFE, d64);
+ alu->dst_start = FIELD_GET(ICE_BST_ALU_DS, d64);
+ alu->dst_len = FIELD_GET(ICE_BST_ALU_DL, d64);
+ alu->flags_extr_imm = FIELD_GET(ICE_BST_ALU_FEI, d64);
+ alu->flags_start_imm = FIELD_GET(ICE_BST_ALU_FSI, d64);
+}
+
+#define ICE_BST_PGKB_F0_ENA BIT_ULL(0)
+#define ICE_BST_PGKB_F0_IDX GENMASK_ULL(6, 1)
+#define ICE_BST_PGKB_F1_ENA BIT_ULL(7)
+#define ICE_BST_PGKB_F1_IDX GENMASK_ULL(13, 8)
+#define ICE_BST_PGKB_F2_ENA BIT_ULL(14)
+#define ICE_BST_PGKB_F2_IDX GENMASK_ULL(20, 15)
+#define ICE_BST_PGKB_F3_ENA BIT_ULL(21)
+#define ICE_BST_PGKB_F3_IDX GENMASK_ULL(27, 22)
+#define ICE_BST_PGKB_AR_IDX GENMASK_ULL(34, 28)
+
+/**
+ * ice_bst_pgkb_init - parse 35 bits of Parse Graph Key Build
+ * @kb: pointer to the Parse Graph Key Build structure
+ * @data: Parse Graph Key Build data to be parsed
+ */
+static void ice_bst_pgkb_init(struct ice_pg_keybuilder *kb, u64 data)
+{
+ kb->flag0_ena = FIELD_GET(ICE_BST_PGKB_F0_ENA, data);
+ kb->flag0_idx = FIELD_GET(ICE_BST_PGKB_F0_IDX, data);
+ kb->flag1_ena = FIELD_GET(ICE_BST_PGKB_F1_ENA, data);
+ kb->flag1_idx = FIELD_GET(ICE_BST_PGKB_F1_IDX, data);
+ kb->flag2_ena = FIELD_GET(ICE_BST_PGKB_F2_ENA, data);
+ kb->flag2_idx = FIELD_GET(ICE_BST_PGKB_F2_IDX, data);
+ kb->flag3_ena = FIELD_GET(ICE_BST_PGKB_F3_ENA, data);
+ kb->flag3_idx = FIELD_GET(ICE_BST_PGKB_F3_IDX, data);
+ kb->alu_reg_idx = FIELD_GET(ICE_BST_PGKB_AR_IDX, data);
+}
+
+#define ICE_BST_NPKB_OPC GENMASK(1, 0)
+#define ICE_BST_NPKB_S_R0 GENMASK(9, 2)
+#define ICE_BST_NPKB_L_R1 GENMASK(17, 10)
+
+/**
+ * ice_bst_npkb_init - parse 18 bits of Next Protocol Key Build
+ * @kb: pointer to the Next Protocol Key Build structure
+ * @data: Next Protocol Key Build data to be parsed
+ */
+static void ice_bst_npkb_init(struct ice_np_keybuilder *kb, u32 data)
+{
+ kb->opc = FIELD_GET(ICE_BST_NPKB_OPC, data);
+ kb->start_reg0 = FIELD_GET(ICE_BST_NPKB_S_R0, data);
+ kb->len_reg1 = FIELD_GET(ICE_BST_NPKB_L_R1, data);
+}
+
+#define ICE_BT_KEY_S 32
+#define ICE_BT_KEY_IDD (ICE_BT_KEY_S / BITS_PER_BYTE)
+#define ICE_BT_KIV_S 192
+#define ICE_BT_KIV_IDD (ICE_BT_KIV_S / BITS_PER_BYTE)
+#define ICE_BT_HIG_S 352
+#define ICE_BT_HIG_IDD (ICE_BT_HIG_S / BITS_PER_BYTE)
+#define ICE_BT_PGP_S 360
+#define ICE_BT_PGP_IDD (ICE_BT_PGP_S / BITS_PER_BYTE)
+#define ICE_BT_PGP_M GENMASK(361 - ICE_BT_PGP_S, 360 - ICE_BT_PGP_S)
+#define ICE_BT_NPKB_S 362
+#define ICE_BT_NPKB_IDD (ICE_BT_NPKB_S / BITS_PER_BYTE)
+#define ICE_BT_NPKB_OFF (ICE_BT_NPKB_S % BITS_PER_BYTE)
+#define ICE_BT_PGKB_S 380
+#define ICE_BT_PGKB_IDD (ICE_BT_PGKB_S / BITS_PER_BYTE)
+#define ICE_BT_PGKB_OFF (ICE_BT_PGKB_S % BITS_PER_BYTE)
+#define ICE_BT_ALU0_S 415
+#define ICE_BT_ALU0_IDD (ICE_BT_ALU0_S / BITS_PER_BYTE)
+#define ICE_BT_ALU0_OFF (ICE_BT_ALU0_S % BITS_PER_BYTE)
+#define ICE_BT_ALU1_S 511
+#define ICE_BT_ALU1_IDD (ICE_BT_ALU1_S / BITS_PER_BYTE)
+#define ICE_BT_ALU1_OFF (ICE_BT_ALU1_S % BITS_PER_BYTE)
+#define ICE_BT_ALU2_S 607
+#define ICE_BT_ALU2_IDD (ICE_BT_ALU2_S / BITS_PER_BYTE)
+#define ICE_BT_ALU2_OFF (ICE_BT_ALU2_S % BITS_PER_BYTE)
+
+/**
+ * ice_bst_parse_item - parse 704 bits of Boost TCAM entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Boost TCAM entry
+ * @item: item of Boost TCAM entry
+ * @data: Boost TCAM entry data to be parsed
+ * @size: size of Boost TCAM entry
+ */
+static void ice_bst_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_bst_tcam_item *ti = item;
+ u8 *buf = (u8 *)data;
+ int i;
+
+ ti->addr = *(u16 *)buf;
+
+ for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++) {
+ ti->key[i] = buf[ICE_BT_KEY_IDD + i];
+ ti->key_inv[i] = buf[ICE_BT_KIV_IDD + i];
+ }
+ ti->hit_idx_grp = buf[ICE_BT_HIG_IDD];
+ ti->pg_prio = buf[ICE_BT_PGP_IDD] & ICE_BT_PGP_M;
+
+ ice_bst_npkb_init(&ti->np_kb,
+ *((u32 *)(&buf[ICE_BT_NPKB_IDD])) >>
+ ICE_BT_NPKB_OFF);
+ ice_bst_pgkb_init(&ti->pg_kb,
+ *((u64 *)(&buf[ICE_BT_PGKB_IDD])) >>
+ ICE_BT_PGKB_OFF);
+
+ ice_bst_alu_init(&ti->alu0, &buf[ICE_BT_ALU0_IDD], ICE_BT_ALU0_OFF);
+ ice_bst_alu_init(&ti->alu1, &buf[ICE_BT_ALU1_IDD], ICE_BT_ALU1_OFF);
+ ice_bst_alu_init(&ti->alu2, &buf[ICE_BT_ALU2_IDD], ICE_BT_ALU2_OFF);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_bst_tcam_dump(hw, ti);
+}
+
+/**
+ * ice_bst_tcam_table_get - create a boost tcam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Boost TCAM table.
+ */
+static struct ice_bst_tcam_item *ice_bst_tcam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_BOOST_TCAM,
+ sizeof(struct ice_bst_tcam_item),
+ ICE_BST_TCAM_TABLE_SIZE,
+ ice_bst_parse_item, true);
+}
+
+static void ice_parse_lbl_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_lbl_item *lbl_item = item;
+ struct ice_lbl_item *lbl_data = data;
+
+ lbl_item->idx = lbl_data->idx;
+ memcpy(lbl_item->label, lbl_data->label, sizeof(lbl_item->label));
+
+ if (strstarts(lbl_item->label, ICE_LBL_BST_DVM))
+ lbl_item->type = ICE_LBL_BST_TYPE_DVM;
+ else if (strstarts(lbl_item->label, ICE_LBL_BST_SVM))
+ lbl_item->type = ICE_LBL_BST_TYPE_SVM;
+ else if (strstarts(lbl_item->label, ICE_LBL_TNL_VXLAN))
+ lbl_item->type = ICE_LBL_BST_TYPE_VXLAN;
+ else if (strstarts(lbl_item->label, ICE_LBL_TNL_GENEVE))
+ lbl_item->type = ICE_LBL_BST_TYPE_GENEVE;
+ else if (strstarts(lbl_item->label, ICE_LBL_TNL_UDP_ECPRI))
+ lbl_item->type = ICE_LBL_BST_TYPE_UDP_ECPRI;
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_lbl_dump(hw, lbl_item);
+}
+
+/**
+ * ice_bst_lbl_table_get - create a boost label table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Boost label table.
+ */
+static struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_LBL_RXPARSER_TMEM,
+ sizeof(struct ice_lbl_item),
+ ICE_BST_TCAM_TABLE_SIZE,
+ ice_parse_lbl_item, true);
+}
+
+/**
+ * ice_bst_tcam_match - match a pattern on the boost tcam table
+ * @tcam_table: boost tcam table to search
+ * @pat: pattern to match
+ *
+ * Return: a pointer to the matching Boost TCAM item or NULL.
+ */
+struct ice_bst_tcam_item *
+ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat)
+{
+ int i;
+
+ for (i = 0; i < ICE_BST_TCAM_TABLE_SIZE; i++) {
+ struct ice_bst_tcam_item *item = &tcam_table[i];
+
+ if (item->hit_idx_grp == 0)
+ continue;
+ if (ice_ternary_match(item->key, item->key_inv, pat,
+ ICE_BST_TCAM_KEY_SIZE))
+ return item;
+ }
+
+ return NULL;
+}
+
+/*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/
+/**
+ * ice_ptype_mk_tcam_dump - dump an ptype marker tcam info
+ * @hw: pointer to the hardware structure
+ * @item: ptype marker tcam to dump
+ */
+static void ice_ptype_mk_tcam_dump(struct ice_hw *hw,
+ struct ice_ptype_mk_tcam_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "address = %d\n", item->address);
+ dev_info(dev, "ptype = %d\n", item->ptype);
+
+ dev_info(dev, "key :");
+ for (i = 0; i < ICE_PTYPE_MK_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key[i]);
+
+ dev_info(dev, "\n");
+
+ dev_info(dev, "key_inv:");
+ for (i = 0; i < ICE_PTYPE_MK_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key_inv[i]);
+
+ dev_info(dev, "\n");
+}
+
+static void ice_parse_ptype_mk_tcam_item(struct ice_hw *hw, u16 idx,
+ void *item, void *data, int size)
+{
+ memcpy(item, data, size);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_ptype_mk_tcam_dump(hw,
+ (struct ice_ptype_mk_tcam_item *)item);
+}
+
+/**
+ * ice_ptype_mk_tcam_table_get - create a ptype marker tcam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Marker PType TCAM table.
+ */
+static
+struct ice_ptype_mk_tcam_item *ice_ptype_mk_tcam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_PTYPE,
+ sizeof(struct ice_ptype_mk_tcam_item),
+ ICE_PTYPE_MK_TCAM_TABLE_SIZE,
+ ice_parse_ptype_mk_tcam_item, true);
+}
+
+/**
+ * ice_ptype_mk_tcam_match - match a pattern on a ptype marker tcam table
+ * @table: ptype marker tcam table to search
+ * @pat: pattern to match
+ * @len: length of the pattern
+ *
+ * Return: a pointer to the matching Marker PType item or NULL.
+ */
+struct ice_ptype_mk_tcam_item *
+ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table,
+ u8 *pat, int len)
+{
+ int i;
+
+ for (i = 0; i < ICE_PTYPE_MK_TCAM_TABLE_SIZE; i++) {
+ struct ice_ptype_mk_tcam_item *item = &table[i];
+
+ if (ice_ternary_match(item->key, item->key_inv, pat, len))
+ return item;
+ }
+
+ return NULL;
+}
+
+/*** ICE_SID_RXPARSER_MARKER_GRP section ***/
+/**
+ * ice_mk_grp_dump - dump an marker group item info
+ * @hw: pointer to the hardware structure
+ * @item: marker group item to dump
+ */
+static void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "index = %d\n", item->idx);
+
+ dev_info(dev, "markers: ");
+ for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++)
+ dev_info(dev, "%d ", item->markers[i]);
+
+ dev_info(dev, "\n");
+}
+
+static void ice_mk_grp_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_mk_grp_item *grp = item;
+ u8 *buf = data;
+ int i;
+
+ grp->idx = idx;
+
+ for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++)
+ grp->markers[i] = buf[i];
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_mk_grp_dump(hw, grp);
+}
+
+/**
+ * ice_mk_grp_table_get - create a marker group table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Marker Group ID table.
+ */
+static struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_GRP,
+ sizeof(struct ice_mk_grp_item),
+ ICE_MK_GRP_TABLE_SIZE,
+ ice_mk_grp_parse_item, false);
+}
+
+/*** ICE_SID_RXPARSER_PROTO_GRP section ***/
+static void ice_proto_off_dump(struct ice_hw *hw,
+ struct ice_proto_off *po, int idx)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "proto %d\n", idx);
+ dev_info(dev, "\tpolarity = %d\n", po->polarity);
+ dev_info(dev, "\tproto_id = %d\n", po->proto_id);
+ dev_info(dev, "\toffset = %d\n", po->offset);
+}
+
+/**
+ * ice_proto_grp_dump - dump a proto group item info
+ * @hw: pointer to the hardware structure
+ * @item: proto group item to dump
+ */
+static void ice_proto_grp_dump(struct ice_hw *hw,
+ struct ice_proto_grp_item *item)
+{
+ int i;
+
+ dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
+
+ for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++)
+ ice_proto_off_dump(hw, &item->po[i], i);
+}
+
+#define ICE_PO_POL BIT(0)
+#define ICE_PO_PID GENMASK(8, 1)
+#define ICE_PO_OFF GENMASK(21, 12)
+
+/**
+ * ice_proto_off_parse - parse 22 bits of Protocol entry
+ * @po: pointer to the Protocol entry structure
+ * @data: Protocol entry data to be parsed
+ */
+static void ice_proto_off_parse(struct ice_proto_off *po, u32 data)
+{
+ po->polarity = FIELD_GET(ICE_PO_POL, data);
+ po->proto_id = FIELD_GET(ICE_PO_PID, data);
+ po->offset = FIELD_GET(ICE_PO_OFF, data);
+}
+
+/**
+ * ice_proto_grp_parse_item - parse 192 bits of Protocol Group Table entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Protocol Group Table entry
+ * @item: item of Protocol Group Table entry
+ * @data: Protocol Group Table entry data to be parsed
+ * @size: size of Protocol Group Table entry
+ */
+static void ice_proto_grp_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_proto_grp_item *grp = item;
+ u8 *buf = (u8 *)data;
+ u8 idd, off;
+ u32 d32;
+ int i;
+
+ grp->idx = idx;
+
+ for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) {
+ idd = (ICE_PROTO_GRP_ITEM_SIZE * i) / BITS_PER_BYTE;
+ off = (ICE_PROTO_GRP_ITEM_SIZE * i) % BITS_PER_BYTE;
+ d32 = *((u32 *)&buf[idd]) >> off;
+ ice_proto_off_parse(&grp->po[i], d32);
+ }
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_proto_grp_dump(hw, grp);
+}
+
+/**
+ * ice_proto_grp_table_get - create a proto group table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Protocol Group table.
+ */
+static struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_PROTO_GRP,
+ sizeof(struct ice_proto_grp_item),
+ ICE_PROTO_GRP_TABLE_SIZE,
+ ice_proto_grp_parse_item, false);
+}
+
+/*** ICE_SID_RXPARSER_FLAG_REDIR section ***/
+/**
+ * ice_flg_rd_dump - dump a flag redirect item info
+ * @hw: pointer to the hardware structure
+ * @item: flag redirect item to dump
+ */
+static void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %d\n", item->idx);
+ dev_info(dev, "expose = %d\n", item->expose);
+ dev_info(dev, "intr_flg_id = %d\n", item->intr_flg_id);
+}
+
+#define ICE_FRT_EXPO BIT(0)
+#define ICE_FRT_IFID GENMASK(6, 1)
+
+/**
+ * ice_flg_rd_parse_item - parse 8 bits of Flag Redirect Table entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Flag Redirect Table entry
+ * @item: item of Flag Redirect Table entry
+ * @data: Flag Redirect Table entry data to be parsed
+ * @size: size of Flag Redirect Table entry
+ */
+static void ice_flg_rd_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_flg_rd_item *rdi = item;
+ u8 d8 = *(u8 *)data;
+
+ rdi->idx = idx;
+ rdi->expose = FIELD_GET(ICE_FRT_EXPO, d8);
+ rdi->intr_flg_id = FIELD_GET(ICE_FRT_IFID, d8);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_flg_rd_dump(hw, rdi);
+}
+
+/**
+ * ice_flg_rd_table_get - create a flag redirect table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Flags Redirection table.
+ */
+static struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_FLAG_REDIR,
+ sizeof(struct ice_flg_rd_item),
+ ICE_FLG_RD_TABLE_SIZE,
+ ice_flg_rd_parse_item, false);
+}
+
+/**
+ * ice_flg_redirect - redirect a parser flag to packet flag
+ * @table: flag redirect table
+ * @psr_flg: parser flag to redirect
+ *
+ * Return: flag or 0 if @psr_flag = 0.
+ */
+u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg)
+{
+ u64 flg = 0;
+ int i;
+
+ for (i = 0; i < ICE_FLG_RDT_SIZE; i++) {
+ struct ice_flg_rd_item *item = &table[i];
+
+ if (!item->expose)
+ continue;
+
+ if (psr_flg & BIT(item->intr_flg_id))
+ flg |= BIT(i);
+ }
+
+ return flg;
+}
+
+/*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL,
+ * ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS
+ * sections ***/
+static void ice_xlt_kb_entry_dump(struct ice_hw *hw,
+ struct ice_xlt_kb_entry *entry, int idx)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "key builder entry %d\n", idx);
+ dev_info(dev, "\txlt1_ad_sel = %d\n", entry->xlt1_ad_sel);
+ dev_info(dev, "\txlt2_ad_sel = %d\n", entry->xlt2_ad_sel);
+
+ for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++)
+ dev_info(dev, "\tflg%d_sel = %d\n", i, entry->flg0_14_sel[i]);
+
+ dev_info(dev, "\txlt1_md_sel = %d\n", entry->xlt1_md_sel);
+ dev_info(dev, "\txlt2_md_sel = %d\n", entry->xlt2_md_sel);
+}
+
+/**
+ * ice_xlt_kb_dump - dump a xlt key build info
+ * @hw: pointer to the hardware structure
+ * @kb: key build to dump
+ */
+static void ice_xlt_kb_dump(struct ice_hw *hw, struct ice_xlt_kb *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "xlt1_pm = %d\n", kb->xlt1_pm);
+ dev_info(dev, "xlt2_pm = %d\n", kb->xlt2_pm);
+ dev_info(dev, "prof_id_pm = %d\n", kb->prof_id_pm);
+ dev_info(dev, "flag15 lo = 0x%08x\n", (u32)kb->flag15);
+ dev_info(dev, "flag15 hi = 0x%08x\n",
+ (u32)(kb->flag15 >> (sizeof(u32) * BITS_PER_BYTE)));
+
+ for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++)
+ ice_xlt_kb_entry_dump(hw, &kb->entries[i], i);
+}
+
+#define ICE_XLT_KB_X1AS_S 32 /* offset for the 1st 64-bits field */
+#define ICE_XLT_KB_X1AS_IDD (ICE_XLT_KB_X1AS_S / BITS_PER_BYTE)
+#define ICE_XLT_KB_X1AS_OFF (ICE_XLT_KB_X1AS_S % BITS_PER_BYTE)
+#define ICE_XLT_KB_X1AS GENMASK_ULL(34 - ICE_XLT_KB_X1AS_S, \
+ 32 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_X2AS GENMASK_ULL(37 - ICE_XLT_KB_X1AS_S, \
+ 35 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL00 GENMASK_ULL(46 - ICE_XLT_KB_X1AS_S, \
+ 38 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL01 GENMASK_ULL(55 - ICE_XLT_KB_X1AS_S, \
+ 47 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL02 GENMASK_ULL(64 - ICE_XLT_KB_X1AS_S, \
+ 56 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL03 GENMASK_ULL(73 - ICE_XLT_KB_X1AS_S, \
+ 65 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL04 GENMASK_ULL(82 - ICE_XLT_KB_X1AS_S, \
+ 74 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL05 GENMASK_ULL(91 - ICE_XLT_KB_X1AS_S, \
+ 83 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL06_S 92 /* offset for the 2nd 64-bits field */
+#define ICE_XLT_KB_FL06_IDD (ICE_XLT_KB_FL06_S / BITS_PER_BYTE)
+#define ICE_XLT_KB_FL06_OFF (ICE_XLT_KB_FL06_S % BITS_PER_BYTE)
+#define ICE_XLT_KB_FL06 GENMASK_ULL(100 - ICE_XLT_KB_FL06_S, \
+ 92 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL07 GENMASK_ULL(109 - ICE_XLT_KB_FL06_S, \
+ 101 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL08 GENMASK_ULL(118 - ICE_XLT_KB_FL06_S, \
+ 110 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL09 GENMASK_ULL(127 - ICE_XLT_KB_FL06_S, \
+ 119 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL10 GENMASK_ULL(136 - ICE_XLT_KB_FL06_S, \
+ 128 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL11 GENMASK_ULL(145 - ICE_XLT_KB_FL06_S, \
+ 137 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL12_S 146 /* offset for the 3rd 64-bits field */
+#define ICE_XLT_KB_FL12_IDD (ICE_XLT_KB_FL12_S / BITS_PER_BYTE)
+#define ICE_XLT_KB_FL12_OFF (ICE_XLT_KB_FL12_S % BITS_PER_BYTE)
+#define ICE_XLT_KB_FL12 GENMASK_ULL(154 - ICE_XLT_KB_FL12_S, \
+ 146 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_FL13 GENMASK_ULL(163 - ICE_XLT_KB_FL12_S, \
+ 155 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_FL14 GENMASK_ULL(181 - ICE_XLT_KB_FL12_S, \
+ 164 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_X1MS GENMASK_ULL(186 - ICE_XLT_KB_FL12_S, \
+ 182 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_X2MS GENMASK_ULL(191 - ICE_XLT_KB_FL12_S, \
+ 187 - ICE_XLT_KB_FL12_S)
+
+/**
+ * ice_kb_entry_init - parse 192 bits of XLT Key Builder entry
+ * @entry: pointer to the XLT Key Builder entry structure
+ * @data: XLT Key Builder entry data to be parsed
+ */
+static void ice_kb_entry_init(struct ice_xlt_kb_entry *entry, u8 *data)
+{
+ u8 i = 0;
+ u64 d64;
+
+ d64 = *((u64 *)&data[ICE_XLT_KB_X1AS_IDD]) >> ICE_XLT_KB_X1AS_OFF;
+
+ entry->xlt1_ad_sel = FIELD_GET(ICE_XLT_KB_X1AS, d64);
+ entry->xlt2_ad_sel = FIELD_GET(ICE_XLT_KB_X2AS, d64);
+
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL00, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL01, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL02, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL03, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL04, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL05, d64);
+
+ d64 = *((u64 *)&data[ICE_XLT_KB_FL06_IDD]) >> ICE_XLT_KB_FL06_OFF;
+
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL06, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL07, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL08, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL09, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL10, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL11, d64);
+
+ d64 = *((u64 *)&data[ICE_XLT_KB_FL12_IDD]) >> ICE_XLT_KB_FL12_OFF;
+
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL12, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL13, d64);
+ entry->flg0_14_sel[i] = FIELD_GET(ICE_XLT_KB_FL14, d64);
+
+ entry->xlt1_md_sel = FIELD_GET(ICE_XLT_KB_X1MS, d64);
+ entry->xlt2_md_sel = FIELD_GET(ICE_XLT_KB_X2MS, d64);
+}
+
+#define ICE_XLT_KB_X1PM_OFF 0
+#define ICE_XLT_KB_X2PM_OFF 1
+#define ICE_XLT_KB_PIPM_OFF 2
+#define ICE_XLT_KB_FL15_OFF 4
+#define ICE_XLT_KB_TBL_OFF 12
+
+/**
+ * ice_parse_kb_data - parse 204 bits of XLT Key Build Table
+ * @hw: pointer to the hardware structure
+ * @kb: pointer to the XLT Key Build Table structure
+ * @data: XLT Key Build Table data to be parsed
+ */
+static void ice_parse_kb_data(struct ice_hw *hw, struct ice_xlt_kb *kb,
+ void *data)
+{
+ u8 *buf = data;
+ int i;
+
+ kb->xlt1_pm = buf[ICE_XLT_KB_X1PM_OFF];
+ kb->xlt2_pm = buf[ICE_XLT_KB_X2PM_OFF];
+ kb->prof_id_pm = buf[ICE_XLT_KB_PIPM_OFF];
+
+ kb->flag15 = *(u64 *)&buf[ICE_XLT_KB_FL15_OFF];
+ for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++)
+ ice_kb_entry_init(&kb->entries[i],
+ &buf[ICE_XLT_KB_TBL_OFF +
+ i * ICE_XLT_KB_TBL_ENTRY_SIZE]);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_xlt_kb_dump(hw, kb);
+}
+
+static struct ice_xlt_kb *ice_xlt_kb_get(struct ice_hw *hw, u32 sect_type)
+{
+ struct ice_pkg_enum state = {};
+ struct ice_seg *seg = hw->seg;
+ struct ice_xlt_kb *kb;
+ void *data;
+
+ if (!seg)
+ return ERR_PTR(-EINVAL);
+
+ kb = kzalloc(sizeof(*kb), GFP_KERNEL);
+ if (!kb)
+ return ERR_PTR(-ENOMEM);
+
+ data = ice_pkg_enum_section(seg, &state, sect_type);
+ if (!data) {
+ ice_debug(hw, ICE_DBG_PARSER, "failed to find section type %d.\n",
+ sect_type);
+ kfree(kb);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ice_parse_kb_data(hw, kb, data);
+
+ return kb;
+}
+
+/**
+ * ice_xlt_kb_get_sw - create switch xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for Switch.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_sw(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_SW);
+}
+
+/**
+ * ice_xlt_kb_get_acl - create acl xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for ACL.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_acl(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_ACL);
+}
+
+/**
+ * ice_xlt_kb_get_fd - create fdir xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for Flow Director.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_fd(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_FD);
+}
+
+/**
+ * ice_xlt_kb_get_rss - create rss xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for RSS.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_rss(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_RSS);
+}
+
+#define ICE_XLT_KB_MASK GENMASK_ULL(5, 0)
+
+/**
+ * ice_xlt_kb_flag_get - aggregate 64 bits packet flag into 16 bits xlt flag
+ * @kb: xlt key build
+ * @pkt_flag: 64 bits packet flag
+ *
+ * Return: XLT flag or 0 if @pkt_flag = 0.
+ */
+u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag)
+{
+ struct ice_xlt_kb_entry *entry = &kb->entries[0];
+ u16 flag = 0;
+ int i;
+
+ /* check flag 15 */
+ if (kb->flag15 & pkt_flag)
+ flag = BIT(ICE_XLT_KB_FLAG0_14_CNT);
+
+ /* check flag 0 - 14 */
+ for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++) {
+ /* only check first entry */
+ u16 idx = entry->flg0_14_sel[i] & ICE_XLT_KB_MASK;
+
+ if (pkt_flag & BIT(idx))
+ flag |= (u16)BIT(i);
+ }
+
+ return flag;
+}
+
+/*** Parser API ***/
+/**
+ * ice_parser_create - create a parser instance
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated parser instance or ERR_PTR
+ * in case of error.
+ */
+struct ice_parser *ice_parser_create(struct ice_hw *hw)
+{
+ struct ice_parser *p;
+ void *err;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ p->hw = hw;
+ p->rt.psr = p;
+
+ p->imem_table = ice_imem_table_get(hw);
+ if (IS_ERR(p->imem_table)) {
+ err = p->imem_table;
+ goto err;
+ }
+
+ p->mi_table = ice_metainit_table_get(hw);
+ if (IS_ERR(p->mi_table)) {
+ err = p->mi_table;
+ goto err;
+ }
+
+ p->pg_cam_table = ice_pg_cam_table_get(hw);
+ if (IS_ERR(p->pg_cam_table)) {
+ err = p->pg_cam_table;
+ goto err;
+ }
+
+ p->pg_sp_cam_table = ice_pg_sp_cam_table_get(hw);
+ if (IS_ERR(p->pg_sp_cam_table)) {
+ err = p->pg_sp_cam_table;
+ goto err;
+ }
+
+ p->pg_nm_cam_table = ice_pg_nm_cam_table_get(hw);
+ if (IS_ERR(p->pg_nm_cam_table)) {
+ err = p->pg_nm_cam_table;
+ goto err;
+ }
+
+ p->pg_nm_sp_cam_table = ice_pg_nm_sp_cam_table_get(hw);
+ if (IS_ERR(p->pg_nm_sp_cam_table)) {
+ err = p->pg_nm_sp_cam_table;
+ goto err;
+ }
+
+ p->bst_tcam_table = ice_bst_tcam_table_get(hw);
+ if (IS_ERR(p->bst_tcam_table)) {
+ err = p->bst_tcam_table;
+ goto err;
+ }
+
+ p->bst_lbl_table = ice_bst_lbl_table_get(hw);
+ if (IS_ERR(p->bst_lbl_table)) {
+ err = p->bst_lbl_table;
+ goto err;
+ }
+
+ p->ptype_mk_tcam_table = ice_ptype_mk_tcam_table_get(hw);
+ if (IS_ERR(p->ptype_mk_tcam_table)) {
+ err = p->ptype_mk_tcam_table;
+ goto err;
+ }
+
+ p->mk_grp_table = ice_mk_grp_table_get(hw);
+ if (IS_ERR(p->mk_grp_table)) {
+ err = p->mk_grp_table;
+ goto err;
+ }
+
+ p->proto_grp_table = ice_proto_grp_table_get(hw);
+ if (IS_ERR(p->proto_grp_table)) {
+ err = p->proto_grp_table;
+ goto err;
+ }
+
+ p->flg_rd_table = ice_flg_rd_table_get(hw);
+ if (IS_ERR(p->flg_rd_table)) {
+ err = p->flg_rd_table;
+ goto err;
+ }
+
+ p->xlt_kb_sw = ice_xlt_kb_get_sw(hw);
+ if (IS_ERR(p->xlt_kb_sw)) {
+ err = p->xlt_kb_sw;
+ goto err;
+ }
+
+ p->xlt_kb_acl = ice_xlt_kb_get_acl(hw);
+ if (IS_ERR(p->xlt_kb_acl)) {
+ err = p->xlt_kb_acl;
+ goto err;
+ }
+
+ p->xlt_kb_fd = ice_xlt_kb_get_fd(hw);
+ if (IS_ERR(p->xlt_kb_fd)) {
+ err = p->xlt_kb_fd;
+ goto err;
+ }
+
+ p->xlt_kb_rss = ice_xlt_kb_get_rss(hw);
+ if (IS_ERR(p->xlt_kb_rss)) {
+ err = p->xlt_kb_rss;
+ goto err;
+ }
+
+ return p;
+err:
+ ice_parser_destroy(p);
+ return err;
+}
+
+/**
+ * ice_parser_destroy - destroy a parser instance
+ * @psr: pointer to a parser instance
+ */
+void ice_parser_destroy(struct ice_parser *psr)
+{
+ kfree(psr->imem_table);
+ kfree(psr->mi_table);
+ kfree(psr->pg_cam_table);
+ kfree(psr->pg_sp_cam_table);
+ kfree(psr->pg_nm_cam_table);
+ kfree(psr->pg_nm_sp_cam_table);
+ kfree(psr->bst_tcam_table);
+ kfree(psr->bst_lbl_table);
+ kfree(psr->ptype_mk_tcam_table);
+ kfree(psr->mk_grp_table);
+ kfree(psr->proto_grp_table);
+ kfree(psr->flg_rd_table);
+ kfree(psr->xlt_kb_sw);
+ kfree(psr->xlt_kb_acl);
+ kfree(psr->xlt_kb_fd);
+ kfree(psr->xlt_kb_rss);
+
+ kfree(psr);
+}
+
+/**
+ * ice_parser_run - parse on a packet in binary and return the result
+ * @psr: pointer to a parser instance
+ * @pkt_buf: packet data
+ * @pkt_len: packet length
+ * @rslt: input/output parameter to save parser result.
+ *
+ * Return: 0 on success or errno.
+ */
+int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf,
+ int pkt_len, struct ice_parser_result *rslt)
+{
+ ice_parser_rt_reset(&psr->rt);
+ ice_parser_rt_pktbuf_set(&psr->rt, pkt_buf, pkt_len);
+
+ return ice_parser_rt_execute(&psr->rt, rslt);
+}
+
+/**
+ * ice_parser_result_dump - dump a parser result info
+ * @hw: pointer to the hardware structure
+ * @rslt: parser result info to dump
+ */
+void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "ptype = %d\n", rslt->ptype);
+ for (i = 0; i < rslt->po_num; i++)
+ dev_info(dev, "proto = %d, offset = %d\n",
+ rslt->po[i].proto_id, rslt->po[i].offset);
+
+ dev_info(dev, "flags_psr = 0x%016llx\n", rslt->flags_psr);
+ dev_info(dev, "flags_pkt = 0x%016llx\n", rslt->flags_pkt);
+ dev_info(dev, "flags_sw = 0x%04x\n", rslt->flags_sw);
+ dev_info(dev, "flags_fd = 0x%04x\n", rslt->flags_fd);
+ dev_info(dev, "flags_rss = 0x%04x\n", rslt->flags_rss);
+}
+
+#define ICE_BT_VLD_KEY 0xFF
+#define ICE_BT_INV_KEY 0xFE
+
+static void ice_bst_dvm_set(struct ice_parser *psr, enum ice_lbl_type type,
+ bool on)
+{
+ u16 i = 0;
+
+ while (true) {
+ struct ice_bst_tcam_item *item;
+ u8 key;
+
+ item = ice_bst_tcam_search(psr->bst_tcam_table,
+ psr->bst_lbl_table,
+ type, &i);
+ if (!item)
+ break;
+
+ key = on ? ICE_BT_VLD_KEY : ICE_BT_INV_KEY;
+ item->key[ICE_BT_VM_OFF] = key;
+ item->key_inv[ICE_BT_VM_OFF] = key;
+ i++;
+ }
+}
+
+/**
+ * ice_parser_dvm_set - configure double vlan mode for parser
+ * @psr: pointer to a parser instance
+ * @on: true to turn on; false to turn off
+ */
+void ice_parser_dvm_set(struct ice_parser *psr, bool on)
+{
+ ice_bst_dvm_set(psr, ICE_LBL_BST_TYPE_DVM, on);
+ ice_bst_dvm_set(psr, ICE_LBL_BST_TYPE_SVM, !on);
+}
+
+static int ice_tunnel_port_set(struct ice_parser *psr, enum ice_lbl_type type,
+ u16 udp_port, bool on)
+{
+ u8 *buf = (u8 *)&udp_port;
+ u16 i = 0;
+
+ while (true) {
+ struct ice_bst_tcam_item *item;
+
+ item = ice_bst_tcam_search(psr->bst_tcam_table,
+ psr->bst_lbl_table,
+ type, &i);
+ if (!item)
+ break;
+
+ /* found empty slot to add */
+ if (on && item->key[ICE_BT_TUN_PORT_OFF_H] == ICE_BT_INV_KEY &&
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] == ICE_BT_INV_KEY) {
+ item->key_inv[ICE_BT_TUN_PORT_OFF_L] =
+ buf[ICE_UDP_PORT_OFF_L];
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] =
+ buf[ICE_UDP_PORT_OFF_H];
+
+ item->key[ICE_BT_TUN_PORT_OFF_L] =
+ ICE_BT_VLD_KEY - buf[ICE_UDP_PORT_OFF_L];
+ item->key[ICE_BT_TUN_PORT_OFF_H] =
+ ICE_BT_VLD_KEY - buf[ICE_UDP_PORT_OFF_H];
+
+ return 0;
+ /* found a matched slot to delete */
+ } else if (!on &&
+ (item->key_inv[ICE_BT_TUN_PORT_OFF_L] ==
+ buf[ICE_UDP_PORT_OFF_L] ||
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] ==
+ buf[ICE_UDP_PORT_OFF_H])) {
+ item->key_inv[ICE_BT_TUN_PORT_OFF_L] = ICE_BT_VLD_KEY;
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] = ICE_BT_INV_KEY;
+
+ item->key[ICE_BT_TUN_PORT_OFF_L] = ICE_BT_VLD_KEY;
+ item->key[ICE_BT_TUN_PORT_OFF_H] = ICE_BT_INV_KEY;
+
+ return 0;
+ }
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * ice_parser_vxlan_tunnel_set - configure vxlan tunnel for parser
+ * @psr: pointer to a parser instance
+ * @udp_port: vxlan tunnel port in UDP header
+ * @on: true to turn on; false to turn off
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_vxlan_tunnel_set(struct ice_parser *psr,
+ u16 udp_port, bool on)
+{
+ return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_VXLAN, udp_port, on);
+}
+
+/**
+ * ice_parser_geneve_tunnel_set - configure geneve tunnel for parser
+ * @psr: pointer to a parser instance
+ * @udp_port: geneve tunnel port in UDP header
+ * @on: true to turn on; false to turn off
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_geneve_tunnel_set(struct ice_parser *psr,
+ u16 udp_port, bool on)
+{
+ return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_GENEVE, udp_port, on);
+}
+
+/**
+ * ice_parser_ecpri_tunnel_set - configure ecpri tunnel for parser
+ * @psr: pointer to a parser instance
+ * @udp_port: ecpri tunnel port in UDP header
+ * @on: true to turn on; false to turn off
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_ecpri_tunnel_set(struct ice_parser *psr,
+ u16 udp_port, bool on)
+{
+ return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_UDP_ECPRI,
+ udp_port, on);
+}
+
+/**
+ * ice_nearest_proto_id - find nearest protocol ID
+ * @rslt: pointer to a parser result instance
+ * @offset: a min value for the protocol offset
+ * @proto_id: the protocol ID (output)
+ * @proto_off: the protocol offset (output)
+ *
+ * From the protocols in @rslt, find the nearest protocol that has offset
+ * larger than @offset.
+ *
+ * Return: if true, the protocol's ID and offset
+ */
+static bool ice_nearest_proto_id(struct ice_parser_result *rslt, u16 offset,
+ u8 *proto_id, u16 *proto_off)
+{
+ u16 dist = U16_MAX;
+ u8 proto = 0;
+ int i;
+
+ for (i = 0; i < rslt->po_num; i++) {
+ if (offset < rslt->po[i].offset)
+ continue;
+ if (offset - rslt->po[i].offset < dist) {
+ proto = rslt->po[i].proto_id;
+ dist = offset - rslt->po[i].offset;
+ }
+ }
+
+ if (dist % 2)
+ return false;
+
+ *proto_id = proto;
+ *proto_off = dist;
+
+ return true;
+}
+
+/* default flag mask to cover GTP_EH_PDU, GTP_EH_PDU_LINK and TUN2
+ * In future, the flag masks should learn from DDP
+ */
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW 0x4002
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL 0x0000
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD 0x6080
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS 0x6010
+
+/**
+ * ice_parser_profile_init - initialize a FXP profile based on parser result
+ * @rslt: a instance of a parser result
+ * @pkt_buf: packet data buffer
+ * @msk_buf: packet mask buffer
+ * @buf_len: packet length
+ * @blk: FXP pipeline stage
+ * @prof: input/output parameter to save the profile
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_profile_init(struct ice_parser_result *rslt,
+ const u8 *pkt_buf, const u8 *msk_buf,
+ int buf_len, enum ice_block blk,
+ struct ice_parser_profile *prof)
+{
+ u8 proto_id = U8_MAX;
+ u16 proto_off = 0;
+ u16 off;
+
+ memset(prof, 0, sizeof(*prof));
+ set_bit(rslt->ptype, prof->ptypes);
+ if (blk == ICE_BLK_SW) {
+ prof->flags = rslt->flags_sw;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW;
+ } else if (blk == ICE_BLK_ACL) {
+ prof->flags = rslt->flags_acl;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL;
+ } else if (blk == ICE_BLK_FD) {
+ prof->flags = rslt->flags_fd;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD;
+ } else if (blk == ICE_BLK_RSS) {
+ prof->flags = rslt->flags_rss;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS;
+ } else {
+ return -EINVAL;
+ }
+
+ for (off = 0; off < buf_len - 1; off++) {
+ if (msk_buf[off] == 0 && msk_buf[off + 1] == 0)
+ continue;
+ if (!ice_nearest_proto_id(rslt, off, &proto_id, &proto_off))
+ continue;
+ if (prof->fv_num >= ICE_PARSER_FV_MAX)
+ return -EINVAL;
+
+ prof->fv[prof->fv_num].proto_id = proto_id;
+ prof->fv[prof->fv_num].offset = proto_off;
+ prof->fv[prof->fv_num].spec = *(const u16 *)&pkt_buf[off];
+ prof->fv[prof->fv_num].msk = *(const u16 *)&msk_buf[off];
+ prof->fv_num++;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_parser_profile_dump - dump an FXP profile info
+ * @hw: pointer to the hardware structure
+ * @prof: profile info to dump
+ */
+void ice_parser_profile_dump(struct ice_hw *hw,
+ struct ice_parser_profile *prof)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ u16 i;
+
+ dev_info(dev, "ptypes:\n");
+ for (i = 0; i < ICE_FLOW_PTYPE_MAX; i++)
+ if (test_bit(i, prof->ptypes))
+ dev_info(dev, "\t%u\n", i);
+
+ for (i = 0; i < prof->fv_num; i++)
+ dev_info(dev, "proto = %u, offset = %2u, spec = 0x%04x, mask = 0x%04x\n",
+ prof->fv[i].proto_id, prof->fv[i].offset,
+ prof->fv[i].spec, prof->fv[i].msk);
+
+ dev_info(dev, "flags = 0x%04x\n", prof->flags);
+ dev_info(dev, "flags_msk = 0x%04x\n", prof->flags_msk);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h
new file mode 100644
index 000000000000..6509d807627c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_parser.h
@@ -0,0 +1,540 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _ICE_PARSER_H_
+#define _ICE_PARSER_H_
+
+#define ICE_SEC_DATA_OFFSET 4
+#define ICE_SID_RXPARSER_IMEM_ENTRY_SIZE 48
+#define ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE 24
+#define ICE_SID_RXPARSER_CAM_ENTRY_SIZE 16
+#define ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE 17
+#define ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE 12
+#define ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE 13
+#define ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE 88
+#define ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE 24
+#define ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE 8
+#define ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE 24
+#define ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE 1
+
+#define ICE_SEC_LBL_DATA_OFFSET 2
+#define ICE_SID_LBL_ENTRY_SIZE 66
+
+/*** ICE_SID_RXPARSER_IMEM section ***/
+#define ICE_IMEM_TABLE_SIZE 192
+
+/* TCAM boost Master; if bit is set, and TCAM hit, TCAM output overrides iMEM
+ * output.
+ */
+struct ice_bst_main {
+ bool alu0;
+ bool alu1;
+ bool alu2;
+ bool pg;
+};
+
+struct ice_bst_keybuilder {
+ u8 prio; /* 0-3: PG precedence within ALUs (3 highest) */
+ bool tsr_ctrl; /* TCAM Search Register control */
+};
+
+/* Next protocol Key builder */
+struct ice_np_keybuilder {
+ u8 opc;
+ u8 start_reg0;
+ u8 len_reg1;
+};
+
+enum ice_np_keybuilder_opcode {
+ ICE_NPKB_OPC_EXTRACT = 0,
+ ICE_NPKB_OPC_BUILD = 1,
+ ICE_NPKB_OPC_BYPASS = 2,
+};
+
+/* Parse Graph Key builder */
+struct ice_pg_keybuilder {
+ bool flag0_ena;
+ bool flag1_ena;
+ bool flag2_ena;
+ bool flag3_ena;
+ u8 flag0_idx;
+ u8 flag1_idx;
+ u8 flag2_idx;
+ u8 flag3_idx;
+ u8 alu_reg_idx;
+};
+
+enum ice_alu_idx {
+ ICE_ALU0_IDX = 0,
+ ICE_ALU1_IDX = 1,
+ ICE_ALU2_IDX = 2,
+};
+
+enum ice_alu_opcode {
+ ICE_ALU_PARK = 0,
+ ICE_ALU_MOV_ADD = 1,
+ ICE_ALU_ADD = 2,
+ ICE_ALU_MOV_AND = 4,
+ ICE_ALU_AND = 5,
+ ICE_ALU_AND_IMM = 6,
+ ICE_ALU_MOV_OR = 7,
+ ICE_ALU_OR = 8,
+ ICE_ALU_MOV_XOR = 9,
+ ICE_ALU_XOR = 10,
+ ICE_ALU_NOP = 11,
+ ICE_ALU_BR = 12,
+ ICE_ALU_BREQ = 13,
+ ICE_ALU_BRNEQ = 14,
+ ICE_ALU_BRGT = 15,
+ ICE_ALU_BRLT = 16,
+ ICE_ALU_BRGEQ = 17,
+ ICE_ALU_BRLEG = 18,
+ ICE_ALU_SETEQ = 19,
+ ICE_ALU_ANDEQ = 20,
+ ICE_ALU_OREQ = 21,
+ ICE_ALU_SETNEQ = 22,
+ ICE_ALU_ANDNEQ = 23,
+ ICE_ALU_ORNEQ = 24,
+ ICE_ALU_SETGT = 25,
+ ICE_ALU_ANDGT = 26,
+ ICE_ALU_ORGT = 27,
+ ICE_ALU_SETLT = 28,
+ ICE_ALU_ANDLT = 29,
+ ICE_ALU_ORLT = 30,
+ ICE_ALU_MOV_SUB = 31,
+ ICE_ALU_SUB = 32,
+ ICE_ALU_INVALID = 64,
+};
+
+enum ice_proto_off_opcode {
+ ICE_PO_OFF_REMAIN = 0,
+ ICE_PO_OFF_HDR_ADD = 1,
+ ICE_PO_OFF_HDR_SUB = 2,
+};
+
+struct ice_alu {
+ enum ice_alu_opcode opc;
+ u8 src_start;
+ u8 src_len;
+ bool shift_xlate_sel;
+ u8 shift_xlate_key;
+ u8 src_reg_id;
+ u8 dst_reg_id;
+ bool inc0;
+ bool inc1;
+ u8 proto_offset_opc;
+ u8 proto_offset;
+ u8 branch_addr;
+ u16 imm;
+ bool dedicate_flags_ena;
+ u8 dst_start;
+ u8 dst_len;
+ bool flags_extr_imm;
+ u8 flags_start_imm;
+};
+
+/* Parser program code (iMEM) */
+struct ice_imem_item {
+ u16 idx;
+ struct ice_bst_main b_m;
+ struct ice_bst_keybuilder b_kb;
+ u8 pg_prio;
+ struct ice_np_keybuilder np_kb;
+ struct ice_pg_keybuilder pg_kb;
+ struct ice_alu alu0;
+ struct ice_alu alu1;
+ struct ice_alu alu2;
+};
+
+/*** ICE_SID_RXPARSER_METADATA_INIT section ***/
+#define ICE_METAINIT_TABLE_SIZE 16
+
+/* Metadata Initialization item */
+struct ice_metainit_item {
+ u16 idx;
+
+ u8 tsr; /* TCAM Search key Register */
+ u16 ho; /* Header Offset register */
+ u16 pc; /* Program Counter register */
+ u16 pg_rn; /* Parse Graph Root Node */
+ u8 cd; /* Control Domain ID */
+
+ /* General Purpose Registers */
+ bool gpr_a_ctrl;
+ u8 gpr_a_data_mdid;
+ u8 gpr_a_data_start;
+ u8 gpr_a_data_len;
+ u8 gpr_a_id;
+
+ bool gpr_b_ctrl;
+ u8 gpr_b_data_mdid;
+ u8 gpr_b_data_start;
+ u8 gpr_b_data_len;
+ u8 gpr_b_id;
+
+ bool gpr_c_ctrl;
+ u8 gpr_c_data_mdid;
+ u8 gpr_c_data_start;
+ u8 gpr_c_data_len;
+ u8 gpr_c_id;
+
+ bool gpr_d_ctrl;
+ u8 gpr_d_data_mdid;
+ u8 gpr_d_data_start;
+ u8 gpr_d_data_len;
+ u8 gpr_d_id;
+
+ u64 flags; /* Initial value for all flags */
+};
+
+/*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL,
+ * ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM
+ * sections ***/
+#define ICE_PG_CAM_TABLE_SIZE 2048
+#define ICE_PG_SP_CAM_TABLE_SIZE 128
+#define ICE_PG_NM_CAM_TABLE_SIZE 1024
+#define ICE_PG_NM_SP_CAM_TABLE_SIZE 64
+
+struct ice_pg_cam_key {
+ bool valid;
+ struct_group_attr(val, __packed,
+ u16 node_id; /* Node ID of protocol in parse graph */
+ bool flag0;
+ bool flag1;
+ bool flag2;
+ bool flag3;
+ u8 boost_idx; /* Boost TCAM match index */
+ u16 alu_reg;
+ u32 next_proto; /* next Protocol value (must be last) */
+ );
+};
+
+struct ice_pg_nm_cam_key {
+ bool valid;
+ struct_group_attr(val, __packed,
+ u16 node_id;
+ bool flag0;
+ bool flag1;
+ bool flag2;
+ bool flag3;
+ u8 boost_idx;
+ u16 alu_reg;
+ );
+};
+
+struct ice_pg_cam_action {
+ u16 next_node; /* Parser Node ID for the next round */
+ u8 next_pc; /* next Program Counter */
+ bool is_pg; /* is protocol group */
+ u8 proto_id; /* protocol ID or proto group ID */
+ bool is_mg; /* is marker group */
+ u8 marker_id; /* marker ID or marker group ID */
+ bool is_last_round;
+ bool ho_polarity; /* header offset polarity */
+ u16 ho_inc;
+};
+
+/* Parse Graph item */
+struct ice_pg_cam_item {
+ u16 idx;
+ struct ice_pg_cam_key key;
+ struct ice_pg_cam_action action;
+};
+
+/* Parse Graph No Match item */
+struct ice_pg_nm_cam_item {
+ u16 idx;
+ struct ice_pg_nm_cam_key key;
+ struct ice_pg_cam_action action;
+};
+
+struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table,
+ int size, struct ice_pg_cam_key *key);
+struct ice_pg_nm_cam_item *
+ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
+ struct ice_pg_cam_key *key);
+
+/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
+#define ICE_BST_TCAM_TABLE_SIZE 256
+#define ICE_BST_TCAM_KEY_SIZE 20
+#define ICE_BST_KEY_TCAM_SIZE 19
+
+/* Boost TCAM item */
+struct ice_bst_tcam_item {
+ u16 addr;
+ u8 key[ICE_BST_TCAM_KEY_SIZE];
+ u8 key_inv[ICE_BST_TCAM_KEY_SIZE];
+ u8 hit_idx_grp;
+ u8 pg_prio;
+ struct ice_np_keybuilder np_kb;
+ struct ice_pg_keybuilder pg_kb;
+ struct ice_alu alu0;
+ struct ice_alu alu1;
+ struct ice_alu alu2;
+};
+
+#define ICE_LBL_LEN 64
+#define ICE_LBL_BST_DVM "BOOST_MAC_VLAN_DVM"
+#define ICE_LBL_BST_SVM "BOOST_MAC_VLAN_SVM"
+#define ICE_LBL_TNL_VXLAN "TNL_VXLAN"
+#define ICE_LBL_TNL_GENEVE "TNL_GENEVE"
+#define ICE_LBL_TNL_UDP_ECPRI "TNL_UDP_ECPRI"
+
+enum ice_lbl_type {
+ ICE_LBL_BST_TYPE_UNKNOWN,
+ ICE_LBL_BST_TYPE_DVM,
+ ICE_LBL_BST_TYPE_SVM,
+ ICE_LBL_BST_TYPE_VXLAN,
+ ICE_LBL_BST_TYPE_GENEVE,
+ ICE_LBL_BST_TYPE_UDP_ECPRI,
+};
+
+struct ice_lbl_item {
+ u16 idx;
+ char label[ICE_LBL_LEN];
+
+ /* must be at the end, not part of the DDP section */
+ enum ice_lbl_type type;
+};
+
+struct ice_bst_tcam_item *
+ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat);
+struct ice_bst_tcam_item *
+ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table,
+ struct ice_lbl_item *lbl_table,
+ enum ice_lbl_type type, u16 *start);
+
+/*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/
+#define ICE_PTYPE_MK_TCAM_TABLE_SIZE 1024
+#define ICE_PTYPE_MK_TCAM_KEY_SIZE 10
+
+struct ice_ptype_mk_tcam_item {
+ u16 address;
+ u16 ptype;
+ u8 key[ICE_PTYPE_MK_TCAM_KEY_SIZE];
+ u8 key_inv[ICE_PTYPE_MK_TCAM_KEY_SIZE];
+} __packed;
+
+struct ice_ptype_mk_tcam_item *
+ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table,
+ u8 *pat, int len);
+/*** ICE_SID_RXPARSER_MARKER_GRP section ***/
+#define ICE_MK_GRP_TABLE_SIZE 128
+#define ICE_MK_COUNT_PER_GRP 8
+
+/* Marker Group item */
+struct ice_mk_grp_item {
+ int idx;
+ u8 markers[ICE_MK_COUNT_PER_GRP];
+};
+
+/*** ICE_SID_RXPARSER_PROTO_GRP section ***/
+#define ICE_PROTO_COUNT_PER_GRP 8
+#define ICE_PROTO_GRP_TABLE_SIZE 192
+#define ICE_PROTO_GRP_ITEM_SIZE 22
+struct ice_proto_off {
+ bool polarity; /* true: positive, false: negative */
+ u8 proto_id;
+ u16 offset; /* 10 bit protocol offset */
+};
+
+/* Protocol Group item */
+struct ice_proto_grp_item {
+ u16 idx;
+ struct ice_proto_off po[ICE_PROTO_COUNT_PER_GRP];
+};
+
+/*** ICE_SID_RXPARSER_FLAG_REDIR section ***/
+#define ICE_FLG_RD_TABLE_SIZE 64
+#define ICE_FLG_RDT_SIZE 64
+
+/* Flags Redirection item */
+struct ice_flg_rd_item {
+ u16 idx;
+ bool expose;
+ u8 intr_flg_id; /* Internal Flag ID */
+};
+
+u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg);
+
+/*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL,
+ * ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS
+ * sections ***/
+#define ICE_XLT_KB_FLAG0_14_CNT 15
+#define ICE_XLT_KB_TBL_CNT 8
+#define ICE_XLT_KB_TBL_ENTRY_SIZE 24
+
+struct ice_xlt_kb_entry {
+ u8 xlt1_ad_sel;
+ u8 xlt2_ad_sel;
+ u16 flg0_14_sel[ICE_XLT_KB_FLAG0_14_CNT];
+ u8 xlt1_md_sel;
+ u8 xlt2_md_sel;
+};
+
+/* XLT Key Builder */
+struct ice_xlt_kb {
+ u8 xlt1_pm; /* XLT1 Partition Mode */
+ u8 xlt2_pm; /* XLT2 Partition Mode */
+ u8 prof_id_pm; /* Profile ID Partition Mode */
+ u64 flag15;
+
+ struct ice_xlt_kb_entry entries[ICE_XLT_KB_TBL_CNT];
+};
+
+u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag);
+
+/*** Parser API ***/
+#define ICE_GPR_HV_IDX 64
+#define ICE_GPR_HV_SIZE 32
+#define ICE_GPR_ERR_IDX 84
+#define ICE_GPR_FLG_IDX 104
+#define ICE_GPR_FLG_SIZE 16
+
+#define ICE_GPR_TSR_IDX 108 /* TSR: TCAM Search Register */
+#define ICE_GPR_NN_IDX 109 /* NN: Next Parsing Cycle Node ID */
+#define ICE_GPR_HO_IDX 110 /* HO: Next Parsing Cycle hdr Offset */
+#define ICE_GPR_NP_IDX 111 /* NP: Next Parsing Cycle */
+
+#define ICE_PARSER_MAX_PKT_LEN 504
+#define ICE_PARSER_PKT_REV 32
+#define ICE_PARSER_GPR_NUM 128
+#define ICE_PARSER_FLG_NUM 64
+#define ICE_PARSER_ERR_NUM 16
+#define ICE_BST_KEY_SIZE 10
+#define ICE_MARKER_ID_SIZE 9
+#define ICE_MARKER_MAX_SIZE \
+ (ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1)
+#define ICE_MARKER_ID_NUM 8
+#define ICE_PO_PAIR_SIZE 256
+
+struct ice_gpr_pu {
+ /* array of flags to indicate if GRP needs to be updated */
+ bool gpr_val_upd[ICE_PARSER_GPR_NUM];
+ u16 gpr_val[ICE_PARSER_GPR_NUM];
+ u64 flg_msk;
+ u64 flg_val;
+ u16 err_msk;
+ u16 err_val;
+};
+
+enum ice_pg_prio {
+ ICE_PG_P0 = 0,
+ ICE_PG_P1 = 1,
+ ICE_PG_P2 = 2,
+ ICE_PG_P3 = 3,
+};
+
+struct ice_parser_rt {
+ struct ice_parser *psr;
+ u16 gpr[ICE_PARSER_GPR_NUM];
+ u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV];
+ u16 pkt_len;
+ u16 po;
+ u8 bst_key[ICE_BST_KEY_SIZE];
+ struct ice_pg_cam_key pg_key;
+ struct ice_alu *alu0;
+ struct ice_alu *alu1;
+ struct ice_alu *alu2;
+ struct ice_pg_cam_action *action;
+ u8 pg_prio;
+ struct ice_gpr_pu pu;
+ u8 markers[ICE_MARKER_ID_SIZE];
+ bool protocols[ICE_PO_PAIR_SIZE];
+ u16 offsets[ICE_PO_PAIR_SIZE];
+};
+
+struct ice_parser_proto_off {
+ u8 proto_id; /* hardware protocol ID */
+ u16 offset; /* offset from the start of the protocol header */
+};
+
+#define ICE_PARSER_PROTO_OFF_PAIR_SIZE 16
+#define ICE_PARSER_FLAG_PSR_SIZE 8
+#define ICE_PARSER_FV_SIZE 48
+#define ICE_PARSER_FV_MAX 24
+#define ICE_BT_TUN_PORT_OFF_H 16
+#define ICE_BT_TUN_PORT_OFF_L 15
+#define ICE_BT_VM_OFF 0
+#define ICE_UDP_PORT_OFF_H 1
+#define ICE_UDP_PORT_OFF_L 0
+
+struct ice_parser_result {
+ u16 ptype; /* 16 bits hardware PTYPE */
+ /* array of protocol and header offset pairs */
+ struct ice_parser_proto_off po[ICE_PARSER_PROTO_OFF_PAIR_SIZE];
+ int po_num; /* # of protocol-offset pairs must <= 16 */
+ u64 flags_psr; /* parser flags */
+ u64 flags_pkt; /* packet flags */
+ u16 flags_sw; /* key builder flags for SW */
+ u16 flags_acl; /* key builder flags for ACL */
+ u16 flags_fd; /* key builder flags for FD */
+ u16 flags_rss; /* key builder flags for RSS */
+};
+
+void ice_parser_rt_reset(struct ice_parser_rt *rt);
+void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf,
+ int pkt_len);
+int ice_parser_rt_execute(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt);
+
+struct ice_parser {
+ struct ice_hw *hw; /* pointer to the hardware structure */
+
+ struct ice_imem_item *imem_table;
+ struct ice_metainit_item *mi_table;
+
+ struct ice_pg_cam_item *pg_cam_table;
+ struct ice_pg_cam_item *pg_sp_cam_table;
+ struct ice_pg_nm_cam_item *pg_nm_cam_table;
+ struct ice_pg_nm_cam_item *pg_nm_sp_cam_table;
+
+ struct ice_bst_tcam_item *bst_tcam_table;
+ struct ice_lbl_item *bst_lbl_table;
+ struct ice_ptype_mk_tcam_item *ptype_mk_tcam_table;
+ struct ice_mk_grp_item *mk_grp_table;
+ struct ice_proto_grp_item *proto_grp_table;
+ struct ice_flg_rd_item *flg_rd_table;
+
+ struct ice_xlt_kb *xlt_kb_sw;
+ struct ice_xlt_kb *xlt_kb_acl;
+ struct ice_xlt_kb *xlt_kb_fd;
+ struct ice_xlt_kb *xlt_kb_rss;
+
+ struct ice_parser_rt rt;
+};
+
+struct ice_parser *ice_parser_create(struct ice_hw *hw);
+void ice_parser_destroy(struct ice_parser *psr);
+void ice_parser_dvm_set(struct ice_parser *psr, bool on);
+int ice_parser_vxlan_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
+int ice_parser_geneve_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
+int ice_parser_ecpri_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
+int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf,
+ int pkt_len, struct ice_parser_result *rslt);
+void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt);
+
+struct ice_parser_fv {
+ u8 proto_id; /* hardware protocol ID */
+ u16 offset; /* offset from the start of the protocol header */
+ u16 spec; /* pattern to match */
+ u16 msk; /* pattern mask */
+};
+
+struct ice_parser_profile {
+ /* array of field vectors */
+ struct ice_parser_fv fv[ICE_PARSER_FV_SIZE];
+ int fv_num; /* # of field vectors must <= 48 */
+ u16 flags; /* key builder flags */
+ u16 flags_msk; /* key builder flag mask */
+
+ DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); /* PTYPE bitmap */
+};
+
+int ice_parser_profile_init(struct ice_parser_result *rslt,
+ const u8 *pkt_buf, const u8 *msk_buf,
+ int buf_len, enum ice_block blk,
+ struct ice_parser_profile *prof);
+void ice_parser_profile_dump(struct ice_hw *hw,
+ struct ice_parser_profile *prof);
+#endif /* _ICE_PARSER_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
new file mode 100644
index 000000000000..dedf5e854e4b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
@@ -0,0 +1,861 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "ice_common.h"
+
+static void ice_rt_tsr_set(struct ice_parser_rt *rt, u16 tsr)
+{
+ rt->gpr[ICE_GPR_TSR_IDX] = tsr;
+}
+
+static void ice_rt_ho_set(struct ice_parser_rt *rt, u16 ho)
+{
+ rt->gpr[ICE_GPR_HO_IDX] = ho;
+ memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE);
+}
+
+static void ice_rt_np_set(struct ice_parser_rt *rt, u16 pc)
+{
+ rt->gpr[ICE_GPR_NP_IDX] = pc;
+}
+
+static void ice_rt_nn_set(struct ice_parser_rt *rt, u16 node)
+{
+ rt->gpr[ICE_GPR_NN_IDX] = node;
+}
+
+static void
+ice_rt_flag_set(struct ice_parser_rt *rt, unsigned int idx, bool set)
+{
+ struct ice_hw *hw = rt->psr->hw;
+ unsigned int word, id;
+
+ word = idx / ICE_GPR_FLG_SIZE;
+ id = idx % ICE_GPR_FLG_SIZE;
+
+ if (set) {
+ rt->gpr[ICE_GPR_FLG_IDX + word] |= (u16)BIT(id);
+ ice_debug(hw, ICE_DBG_PARSER, "Set parser flag %u\n", idx);
+ } else {
+ rt->gpr[ICE_GPR_FLG_IDX + word] &= ~(u16)BIT(id);
+ ice_debug(hw, ICE_DBG_PARSER, "Clear parser flag %u\n", idx);
+ }
+}
+
+static void ice_rt_gpr_set(struct ice_parser_rt *rt, int idx, u16 val)
+{
+ struct ice_hw *hw = rt->psr->hw;
+
+ if (idx == ICE_GPR_HO_IDX)
+ ice_rt_ho_set(rt, val);
+ else
+ rt->gpr[idx] = val;
+
+ ice_debug(hw, ICE_DBG_PARSER, "Set GPR %d value %d\n", idx, val);
+}
+
+static void ice_rt_err_set(struct ice_parser_rt *rt, unsigned int idx, bool set)
+{
+ struct ice_hw *hw = rt->psr->hw;
+
+ if (set) {
+ rt->gpr[ICE_GPR_ERR_IDX] |= (u16)BIT(idx);
+ ice_debug(hw, ICE_DBG_PARSER, "Set parser error %u\n", idx);
+ } else {
+ rt->gpr[ICE_GPR_ERR_IDX] &= ~(u16)BIT(idx);
+ ice_debug(hw, ICE_DBG_PARSER, "Reset parser error %u\n", idx);
+ }
+}
+
+/**
+ * ice_parser_rt_reset - reset the parser runtime
+ * @rt: pointer to the parser runtime
+ */
+void ice_parser_rt_reset(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+ struct ice_metainit_item *mi;
+ unsigned int i;
+
+ mi = &psr->mi_table[0];
+
+ memset(rt, 0, sizeof(*rt));
+ rt->psr = psr;
+
+ ice_rt_tsr_set(rt, mi->tsr);
+ ice_rt_ho_set(rt, mi->ho);
+ ice_rt_np_set(rt, mi->pc);
+ ice_rt_nn_set(rt, mi->pg_rn);
+
+ for (i = 0; i < ICE_PARSER_FLG_NUM; i++) {
+ if (mi->flags & BIT(i))
+ ice_rt_flag_set(rt, i, true);
+ }
+}
+
+/**
+ * ice_parser_rt_pktbuf_set - set a packet into parser runtime
+ * @rt: pointer to the parser runtime
+ * @pkt_buf: buffer with packet data
+ * @pkt_len: packet buffer length
+ */
+void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf,
+ int pkt_len)
+{
+ int len = min(ICE_PARSER_MAX_PKT_LEN, pkt_len);
+ u16 ho = rt->gpr[ICE_GPR_HO_IDX];
+
+ memcpy(rt->pkt_buf, pkt_buf, len);
+ rt->pkt_len = pkt_len;
+
+ memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE);
+}
+
+static void ice_bst_key_init(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ u8 tsr = (u8)rt->gpr[ICE_GPR_TSR_IDX];
+ u16 ho = rt->gpr[ICE_GPR_HO_IDX];
+ u8 *key = rt->bst_key;
+ int idd, i;
+
+ idd = ICE_BST_TCAM_KEY_SIZE - 1;
+ if (imem->b_kb.tsr_ctrl)
+ key[idd] = tsr;
+ else
+ key[idd] = imem->b_kb.prio;
+
+ idd = ICE_BST_KEY_TCAM_SIZE - 1;
+ for (i = idd; i >= 0; i--) {
+ int j;
+
+ j = ho + idd - i;
+ if (j < ICE_PARSER_MAX_PKT_LEN)
+ key[i] = rt->pkt_buf[ho + idd - i];
+ else
+ key[i] = 0;
+ }
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n");
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ key[0], key[1], key[2], key[3], key[4],
+ key[5], key[6], key[7], key[8], key[9]);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n");
+}
+
+static u16 ice_bit_rev_u16(u16 v, int len)
+{
+ return bitrev16(v) >> (BITS_PER_TYPE(v) - len);
+}
+
+static u32 ice_bit_rev_u32(u32 v, int len)
+{
+ return bitrev32(v) >> (BITS_PER_TYPE(v) - len);
+}
+
+static u32 ice_hv_bit_sel(struct ice_parser_rt *rt, int start, int len)
+{
+ int offset;
+ u32 buf[2];
+ u64 val;
+
+ offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16));
+
+ memcpy(buf, &rt->gpr[offset], sizeof(buf));
+
+ buf[0] = bitrev8x4(buf[0]);
+ buf[1] = bitrev8x4(buf[1]);
+
+ val = *(u64 *)buf;
+ val >>= start % BITS_PER_TYPE(u16);
+
+ return ice_bit_rev_u32(val, len);
+}
+
+static u32 ice_pk_build(struct ice_parser_rt *rt,
+ struct ice_np_keybuilder *kb)
+{
+ if (kb->opc == ICE_NPKB_OPC_EXTRACT)
+ return ice_hv_bit_sel(rt, kb->start_reg0, kb->len_reg1);
+ else if (kb->opc == ICE_NPKB_OPC_BUILD)
+ return rt->gpr[kb->start_reg0] |
+ ((u32)rt->gpr[kb->len_reg1] << BITS_PER_TYPE(u16));
+ else if (kb->opc == ICE_NPKB_OPC_BYPASS)
+ return 0;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported OP Code %u\n",
+ kb->opc);
+ return U32_MAX;
+}
+
+static bool ice_flag_get(struct ice_parser_rt *rt, unsigned int index)
+{
+ int word = index / ICE_GPR_FLG_SIZE;
+ int id = index % ICE_GPR_FLG_SIZE;
+
+ return !!(rt->gpr[ICE_GPR_FLG_IDX + word] & (u16)BIT(id));
+}
+
+static int ice_imem_pgk_init(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ memset(&rt->pg_key, 0, sizeof(rt->pg_key));
+ rt->pg_key.next_proto = ice_pk_build(rt, &imem->np_kb);
+ if (rt->pg_key.next_proto == U32_MAX)
+ return -EINVAL;
+
+ if (imem->pg_kb.flag0_ena)
+ rt->pg_key.flag0 = ice_flag_get(rt, imem->pg_kb.flag0_idx);
+ if (imem->pg_kb.flag1_ena)
+ rt->pg_key.flag1 = ice_flag_get(rt, imem->pg_kb.flag1_idx);
+ if (imem->pg_kb.flag2_ena)
+ rt->pg_key.flag2 = ice_flag_get(rt, imem->pg_kb.flag2_idx);
+ if (imem->pg_kb.flag3_ena)
+ rt->pg_key.flag3 = ice_flag_get(rt, imem->pg_kb.flag3_idx);
+
+ rt->pg_key.alu_reg = rt->gpr[imem->pg_kb.alu_reg_idx];
+ rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX];
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n",
+ rt->pg_key.node_id,
+ rt->pg_key.flag0,
+ rt->pg_key.flag1,
+ rt->pg_key.flag2,
+ rt->pg_key.flag3,
+ rt->pg_key.boost_idx,
+ rt->pg_key.alu_reg,
+ rt->pg_key.next_proto);
+
+ return 0;
+}
+
+static void ice_imem_alu0_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->alu0 = &imem->alu0;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from imem pc %d\n",
+ imem->idx);
+}
+
+static void ice_imem_alu1_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->alu1 = &imem->alu1;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from imem pc %d\n",
+ imem->idx);
+}
+
+static void ice_imem_alu2_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->alu2 = &imem->alu2;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from imem pc %d\n",
+ imem->idx);
+}
+
+static void ice_imem_pgp_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->pg_prio = imem->pg_prio;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from imem pc %d\n",
+ rt->pg_prio, imem->idx);
+}
+
+static int ice_bst_pgk_init(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ memset(&rt->pg_key, 0, sizeof(rt->pg_key));
+ rt->pg_key.boost_idx = bst->hit_idx_grp;
+ rt->pg_key.next_proto = ice_pk_build(rt, &bst->np_kb);
+ if (rt->pg_key.next_proto == U32_MAX)
+ return -EINVAL;
+
+ if (bst->pg_kb.flag0_ena)
+ rt->pg_key.flag0 = ice_flag_get(rt, bst->pg_kb.flag0_idx);
+ if (bst->pg_kb.flag1_ena)
+ rt->pg_key.flag1 = ice_flag_get(rt, bst->pg_kb.flag1_idx);
+ if (bst->pg_kb.flag2_ena)
+ rt->pg_key.flag2 = ice_flag_get(rt, bst->pg_kb.flag2_idx);
+ if (bst->pg_kb.flag3_ena)
+ rt->pg_key.flag3 = ice_flag_get(rt, bst->pg_kb.flag3_idx);
+
+ rt->pg_key.alu_reg = rt->gpr[bst->pg_kb.alu_reg_idx];
+ rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX];
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n",
+ rt->pg_key.node_id,
+ rt->pg_key.flag0,
+ rt->pg_key.flag1,
+ rt->pg_key.flag2,
+ rt->pg_key.flag3,
+ rt->pg_key.boost_idx,
+ rt->pg_key.alu_reg,
+ rt->pg_key.next_proto);
+
+ return 0;
+}
+
+static void ice_bst_alu0_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->alu0 = &bst->alu0;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from boost address %d\n",
+ bst->addr);
+}
+
+static void ice_bst_alu1_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->alu1 = &bst->alu1;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from boost address %d\n",
+ bst->addr);
+}
+
+static void ice_bst_alu2_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->alu2 = &bst->alu2;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from boost address %d\n",
+ bst->addr);
+}
+
+static void ice_bst_pgp_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->pg_prio = bst->pg_prio;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from boost address %d\n",
+ rt->pg_prio, bst->addr);
+}
+
+static struct ice_pg_cam_item *ice_rt_pg_cam_match(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+ struct ice_pg_cam_item *item;
+
+ item = ice_pg_cam_match(psr->pg_cam_table, ICE_PG_CAM_TABLE_SIZE,
+ &rt->pg_key);
+ if (!item)
+ item = ice_pg_cam_match(psr->pg_sp_cam_table,
+ ICE_PG_SP_CAM_TABLE_SIZE, &rt->pg_key);
+ return item;
+}
+
+static
+struct ice_pg_nm_cam_item *ice_rt_pg_nm_cam_match(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+ struct ice_pg_nm_cam_item *item;
+
+ item = ice_pg_nm_cam_match(psr->pg_nm_cam_table,
+ ICE_PG_NM_CAM_TABLE_SIZE, &rt->pg_key);
+
+ if (!item)
+ item = ice_pg_nm_cam_match(psr->pg_nm_sp_cam_table,
+ ICE_PG_NM_SP_CAM_TABLE_SIZE,
+ &rt->pg_key);
+ return item;
+}
+
+static void ice_gpr_add(struct ice_parser_rt *rt, int idx, u16 val)
+{
+ rt->pu.gpr_val_upd[idx] = true;
+ rt->pu.gpr_val[idx] = val;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for register %d value %d\n",
+ idx, val);
+}
+
+static void ice_pg_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action ...\n");
+
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, rt->action->next_pc);
+ ice_gpr_add(rt, ICE_GPR_NN_IDX, rt->action->next_node);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action done.\n");
+}
+
+static void ice_flg_add(struct ice_parser_rt *rt, int idx, bool val)
+{
+ rt->pu.flg_msk |= BIT_ULL(idx);
+ if (val)
+ rt->pu.flg_val |= BIT_ULL(idx);
+ else
+ rt->pu.flg_val &= ~BIT_ULL(idx);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for flag %d value %d\n",
+ idx, val);
+}
+
+static void ice_flg_update(struct ice_parser_rt *rt, struct ice_alu *alu)
+{
+ u32 hv_bit_sel;
+ int i;
+
+ if (!alu->dedicate_flags_ena)
+ return;
+
+ if (alu->flags_extr_imm) {
+ for (i = 0; i < alu->dst_len; i++)
+ ice_flg_add(rt, alu->dst_start + i,
+ !!(alu->flags_start_imm & BIT(i)));
+ } else {
+ for (i = 0; i < alu->dst_len; i++) {
+ hv_bit_sel = ice_hv_bit_sel(rt,
+ alu->flags_start_imm + i,
+ 1);
+ ice_flg_add(rt, alu->dst_start + i, !!hv_bit_sel);
+ }
+ }
+}
+
+static void ice_po_update(struct ice_parser_rt *rt, struct ice_alu *alu)
+{
+ if (alu->proto_offset_opc == ICE_PO_OFF_HDR_ADD)
+ rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] + alu->proto_offset);
+ else if (alu->proto_offset_opc == ICE_PO_OFF_HDR_SUB)
+ rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] - alu->proto_offset);
+ else if (alu->proto_offset_opc == ICE_PO_OFF_REMAIN)
+ rt->po = rt->gpr[ICE_GPR_HO_IDX];
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Update Protocol Offset = %d\n",
+ rt->po);
+}
+
+static u16 ice_reg_bit_sel(struct ice_parser_rt *rt, int reg_idx,
+ int start, int len)
+{
+ int offset;
+ u32 val;
+
+ offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16));
+
+ memcpy(&val, &rt->gpr[offset], sizeof(val));
+
+ val = bitrev8x4(val);
+ val >>= start % BITS_PER_TYPE(u16);
+
+ return ice_bit_rev_u16(val, len);
+}
+
+static void ice_err_add(struct ice_parser_rt *rt, int idx, bool val)
+{
+ rt->pu.err_msk |= (u16)BIT(idx);
+ if (val)
+ rt->pu.flg_val |= (u64)BIT_ULL(idx);
+ else
+ rt->pu.flg_val &= ~(u64)BIT_ULL(idx);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for error %d value %d\n",
+ idx, val);
+}
+
+static void ice_dst_reg_bit_set(struct ice_parser_rt *rt, struct ice_alu *alu,
+ bool val)
+{
+ u16 flg_idx;
+
+ if (alu->dedicate_flags_ena) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "DedicatedFlagsEnable should not be enabled in opcode %d\n",
+ alu->opc);
+ return;
+ }
+
+ if (alu->dst_reg_id == ICE_GPR_ERR_IDX) {
+ if (alu->dst_start >= ICE_PARSER_ERR_NUM) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid error %d\n",
+ alu->dst_start);
+ return;
+ }
+ ice_err_add(rt, alu->dst_start, val);
+ } else if (alu->dst_reg_id >= ICE_GPR_FLG_IDX) {
+ flg_idx = (u16)(((alu->dst_reg_id - ICE_GPR_FLG_IDX) << 4) +
+ alu->dst_start);
+
+ if (flg_idx >= ICE_PARSER_FLG_NUM) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid flag %d\n",
+ flg_idx);
+ return;
+ }
+ ice_flg_add(rt, flg_idx, val);
+ } else {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unexpected Dest Register Bit set, RegisterID %d Start %d\n",
+ alu->dst_reg_id, alu->dst_start);
+ }
+}
+
+static void ice_alu_exe(struct ice_parser_rt *rt, struct ice_alu *alu)
+{
+ u16 dst, src, shift, imm;
+
+ if (alu->shift_xlate_sel) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "shift_xlate_sel != 0 is not expected\n");
+ return;
+ }
+
+ ice_po_update(rt, alu);
+ ice_flg_update(rt, alu);
+
+ dst = rt->gpr[alu->dst_reg_id];
+ src = ice_reg_bit_sel(rt, alu->src_reg_id,
+ alu->src_start, alu->src_len);
+ shift = alu->shift_xlate_key;
+ imm = alu->imm;
+
+ switch (alu->opc) {
+ case ICE_ALU_PARK:
+ break;
+ case ICE_ALU_MOV_ADD:
+ dst = (src << shift) + imm;
+ ice_gpr_add(rt, alu->dst_reg_id, dst);
+ break;
+ case ICE_ALU_ADD:
+ dst += (src << shift) + imm;
+ ice_gpr_add(rt, alu->dst_reg_id, dst);
+ break;
+ case ICE_ALU_ORLT:
+ if (src < imm)
+ ice_dst_reg_bit_set(rt, alu, true);
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
+ break;
+ case ICE_ALU_OREQ:
+ if (src == imm)
+ ice_dst_reg_bit_set(rt, alu, true);
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
+ break;
+ case ICE_ALU_SETEQ:
+ ice_dst_reg_bit_set(rt, alu, src == imm);
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
+ break;
+ case ICE_ALU_MOV_XOR:
+ dst = (src << shift) ^ imm;
+ ice_gpr_add(rt, alu->dst_reg_id, dst);
+ break;
+ default:
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported ALU instruction %d\n",
+ alu->opc);
+ break;
+ }
+}
+
+static void ice_alu0_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 ...\n");
+ ice_alu_exe(rt, rt->alu0);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 done.\n");
+}
+
+static void ice_alu1_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 ...\n");
+ ice_alu_exe(rt, rt->alu1);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 done.\n");
+}
+
+static void ice_alu2_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 ...\n");
+ ice_alu_exe(rt, rt->alu2);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 done.\n");
+}
+
+static void ice_pu_exe(struct ice_parser_rt *rt)
+{
+ struct ice_gpr_pu *pu = &rt->pu;
+ unsigned int i;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers ...\n");
+
+ for (i = 0; i < ICE_PARSER_GPR_NUM; i++) {
+ if (pu->gpr_val_upd[i])
+ ice_rt_gpr_set(rt, i, pu->gpr_val[i]);
+ }
+
+ for (i = 0; i < ICE_PARSER_FLG_NUM; i++) {
+ if (pu->flg_msk & BIT(i))
+ ice_rt_flag_set(rt, i, pu->flg_val & BIT(i));
+ }
+
+ for (i = 0; i < ICE_PARSER_ERR_NUM; i++) {
+ if (pu->err_msk & BIT(i))
+ ice_rt_err_set(rt, i, pu->err_val & BIT(i));
+ }
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers done.\n");
+}
+
+static void ice_alu_pg_exe(struct ice_parser_rt *rt)
+{
+ memset(&rt->pu, 0, sizeof(rt->pu));
+
+ switch (rt->pg_prio) {
+ case (ICE_PG_P0):
+ ice_pg_exe(rt);
+ ice_alu0_exe(rt);
+ ice_alu1_exe(rt);
+ ice_alu2_exe(rt);
+ break;
+ case (ICE_PG_P1):
+ ice_alu0_exe(rt);
+ ice_pg_exe(rt);
+ ice_alu1_exe(rt);
+ ice_alu2_exe(rt);
+ break;
+ case (ICE_PG_P2):
+ ice_alu0_exe(rt);
+ ice_alu1_exe(rt);
+ ice_pg_exe(rt);
+ ice_alu2_exe(rt);
+ break;
+ case (ICE_PG_P3):
+ ice_alu0_exe(rt);
+ ice_alu1_exe(rt);
+ ice_alu2_exe(rt);
+ ice_pg_exe(rt);
+ break;
+ }
+
+ ice_pu_exe(rt);
+
+ if (rt->action->ho_inc == 0)
+ return;
+
+ if (rt->action->ho_polarity)
+ ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] + rt->action->ho_inc);
+ else
+ ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] - rt->action->ho_inc);
+}
+
+static void ice_proto_off_update(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+
+ if (rt->action->is_pg) {
+ struct ice_proto_grp_item *proto_grp =
+ &psr->proto_grp_table[rt->action->proto_id];
+ u16 po;
+ int i;
+
+ for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) {
+ struct ice_proto_off *entry = &proto_grp->po[i];
+
+ if (entry->proto_id == U8_MAX)
+ break;
+
+ if (!entry->polarity)
+ po = rt->po + entry->offset;
+ else
+ po = rt->po - entry->offset;
+
+ rt->protocols[entry->proto_id] = true;
+ rt->offsets[entry->proto_id] = po;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n",
+ entry->proto_id, po);
+ }
+ } else {
+ rt->protocols[rt->action->proto_id] = true;
+ rt->offsets[rt->action->proto_id] = rt->po;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n",
+ rt->action->proto_id, rt->po);
+ }
+}
+
+static void ice_marker_set(struct ice_parser_rt *rt, int idx)
+{
+ unsigned int byte = idx / BITS_PER_BYTE;
+ unsigned int bit = idx % BITS_PER_BYTE;
+
+ rt->markers[byte] |= (u8)BIT(bit);
+}
+
+static void ice_marker_update(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+
+ if (rt->action->is_mg) {
+ struct ice_mk_grp_item *mk_grp =
+ &psr->mk_grp_table[rt->action->marker_id];
+ int i;
+
+ for (i = 0; i < ICE_MARKER_ID_NUM; i++) {
+ u8 marker = mk_grp->markers[i];
+
+ if (marker == ICE_MARKER_MAX_SIZE)
+ break;
+
+ ice_marker_set(rt, marker);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n",
+ marker);
+ }
+ } else {
+ if (rt->action->marker_id != ICE_MARKER_MAX_SIZE)
+ ice_marker_set(rt, rt->action->marker_id);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n",
+ rt->action->marker_id);
+ }
+}
+
+static u16 ice_ptype_resolve(struct ice_parser_rt *rt)
+{
+ struct ice_ptype_mk_tcam_item *item;
+ struct ice_parser *psr = rt->psr;
+
+ item = ice_ptype_mk_tcam_match(psr->ptype_mk_tcam_table,
+ rt->markers, ICE_MARKER_ID_SIZE);
+ if (item)
+ return item->ptype;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Could not resolve PTYPE\n");
+ return U16_MAX;
+}
+
+static void ice_proto_off_resolve(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt)
+{
+ int i;
+
+ for (i = 0; i < ICE_PO_PAIR_SIZE - 1; i++) {
+ if (rt->protocols[i]) {
+ rslt->po[rslt->po_num].proto_id = (u8)i;
+ rslt->po[rslt->po_num].offset = rt->offsets[i];
+ rslt->po_num++;
+ }
+ }
+}
+
+static void ice_result_resolve(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt)
+{
+ struct ice_parser *psr = rt->psr;
+
+ memset(rslt, 0, sizeof(*rslt));
+
+ memcpy(&rslt->flags_psr, &rt->gpr[ICE_GPR_FLG_IDX],
+ ICE_PARSER_FLAG_PSR_SIZE);
+ rslt->flags_pkt = ice_flg_redirect(psr->flg_rd_table, rslt->flags_psr);
+ rslt->flags_sw = ice_xlt_kb_flag_get(psr->xlt_kb_sw, rslt->flags_pkt);
+ rslt->flags_fd = ice_xlt_kb_flag_get(psr->xlt_kb_fd, rslt->flags_pkt);
+ rslt->flags_rss = ice_xlt_kb_flag_get(psr->xlt_kb_rss, rslt->flags_pkt);
+
+ ice_proto_off_resolve(rt, rslt);
+ rslt->ptype = ice_ptype_resolve(rt);
+}
+
+/**
+ * ice_parser_rt_execute - parser execution routine
+ * @rt: pointer to the parser runtime
+ * @rslt: input/output parameter to save parser result
+ *
+ * Return: 0 on success or errno.
+ */
+int ice_parser_rt_execute(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt)
+{
+ struct ice_pg_nm_cam_item *pg_nm_cam;
+ struct ice_parser *psr = rt->psr;
+ struct ice_pg_cam_item *pg_cam;
+ int status = 0;
+ u16 node;
+ u16 pc;
+
+ node = rt->gpr[ICE_GPR_NN_IDX];
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Start with Node: %u\n", node);
+
+ while (true) {
+ struct ice_bst_tcam_item *bst;
+ struct ice_imem_item *imem;
+
+ pc = rt->gpr[ICE_GPR_NP_IDX];
+ imem = &psr->imem_table[pc];
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load imem at pc: %u\n",
+ pc);
+
+ ice_bst_key_init(rt, imem);
+ bst = ice_bst_tcam_match(psr->bst_tcam_table, rt->bst_key);
+ if (!bst) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "No Boost TCAM Match\n");
+ status = ice_imem_pgk_init(rt, imem);
+ if (status)
+ break;
+ ice_imem_alu0_set(rt, imem);
+ ice_imem_alu1_set(rt, imem);
+ ice_imem_alu2_set(rt, imem);
+ ice_imem_pgp_set(rt, imem);
+ } else {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Boost TCAM Match address: %u\n",
+ bst->addr);
+ if (imem->b_m.pg) {
+ status = ice_bst_pgk_init(rt, bst);
+ if (status)
+ break;
+ ice_bst_pgp_set(rt, bst);
+ } else {
+ status = ice_imem_pgk_init(rt, imem);
+ if (status)
+ break;
+ ice_imem_pgp_set(rt, imem);
+ }
+
+ if (imem->b_m.alu0)
+ ice_bst_alu0_set(rt, bst);
+ else
+ ice_imem_alu0_set(rt, imem);
+
+ if (imem->b_m.alu1)
+ ice_bst_alu1_set(rt, bst);
+ else
+ ice_imem_alu1_set(rt, imem);
+
+ if (imem->b_m.alu2)
+ ice_bst_alu2_set(rt, bst);
+ else
+ ice_imem_alu2_set(rt, imem);
+ }
+
+ rt->action = NULL;
+ pg_cam = ice_rt_pg_cam_match(rt);
+ if (!pg_cam) {
+ pg_nm_cam = ice_rt_pg_nm_cam_match(rt);
+ if (pg_nm_cam) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph Nomatch CAM Address %u\n",
+ pg_nm_cam->idx);
+ rt->action = &pg_nm_cam->action;
+ }
+ } else {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph CAM Address %u\n",
+ pg_cam->idx);
+ rt->action = &pg_cam->action;
+ }
+
+ if (!rt->action) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Failed to match ParseGraph CAM, stop parsing.\n");
+ status = -EINVAL;
+ break;
+ }
+
+ ice_alu_pg_exe(rt);
+ ice_marker_update(rt);
+ ice_proto_off_update(rt);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Go to node %u\n",
+ rt->action->next_node);
+
+ if (rt->action->is_last_round) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Last Round in ParseGraph Action, stop parsing.\n");
+ break;
+ }
+
+ if (rt->gpr[ICE_GPR_HO_IDX] >= rt->pkt_len) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Header Offset (%u) is larger than packet len (%u), stop parsing\n",
+ rt->gpr[ICE_GPR_HO_IDX], rt->pkt_len);
+ break;
+ }
+ }
+
+ ice_result_resolve(rt, rslt);
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index bdda3401e343..970a99a52bf1 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -59,12 +59,13 @@ static void
ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_repr *repr = np->repr;
struct ice_eth_stats *eth_stats;
struct ice_vsi *vsi;
- if (ice_is_vf_disabled(np->repr->vf))
+ if (repr->ops.ready(repr))
return;
- vsi = np->repr->src_vsi;
+ vsi = repr->src_vsi;
ice_update_vsi_stats(vsi);
eth_stats = &vsi->eth_stats;
@@ -93,7 +94,7 @@ struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
}
/**
- * ice_repr_open - Enable port representor's network interface
+ * ice_repr_vf_open - Enable port representor's network interface
* @netdev: network interface device structure
*
* The open entry point is called when a port representor's network
@@ -102,7 +103,7 @@ struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
*
* Returns 0 on success
*/
-static int ice_repr_open(struct net_device *netdev)
+static int ice_repr_vf_open(struct net_device *netdev)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_vf *vf;
@@ -118,8 +119,16 @@ static int ice_repr_open(struct net_device *netdev)
return 0;
}
+static int ice_repr_sf_open(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+}
+
/**
- * ice_repr_stop - Disable port representor's network interface
+ * ice_repr_vf_stop - Disable port representor's network interface
* @netdev: network interface device structure
*
* The stop entry point is called when a port representor's network
@@ -128,7 +137,7 @@ static int ice_repr_open(struct net_device *netdev)
*
* Returns 0 on success
*/
-static int ice_repr_stop(struct net_device *netdev)
+static int ice_repr_vf_stop(struct net_device *netdev)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_vf *vf;
@@ -144,6 +153,14 @@ static int ice_repr_stop(struct net_device *netdev)
return 0;
}
+static int ice_repr_sf_stop(struct net_device *netdev)
+{
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+}
+
/**
* ice_repr_sp_stats64 - get slow path stats for port representor
* @dev: network interface device structure
@@ -245,10 +262,20 @@ ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
}
-static const struct net_device_ops ice_repr_netdev_ops = {
+static const struct net_device_ops ice_repr_vf_netdev_ops = {
.ndo_get_stats64 = ice_repr_get_stats64,
- .ndo_open = ice_repr_open,
- .ndo_stop = ice_repr_stop,
+ .ndo_open = ice_repr_vf_open,
+ .ndo_stop = ice_repr_vf_stop,
+ .ndo_start_xmit = ice_eswitch_port_start_xmit,
+ .ndo_setup_tc = ice_repr_setup_tc,
+ .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
+ .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
+};
+
+static const struct net_device_ops ice_repr_sf_netdev_ops = {
+ .ndo_get_stats64 = ice_repr_get_stats64,
+ .ndo_open = ice_repr_sf_open,
+ .ndo_stop = ice_repr_sf_stop,
.ndo_start_xmit = ice_eswitch_port_start_xmit,
.ndo_setup_tc = ice_repr_setup_tc,
.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
@@ -261,18 +288,20 @@ static const struct net_device_ops ice_repr_netdev_ops = {
*/
bool ice_is_port_repr_netdev(const struct net_device *netdev)
{
- return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
+ return netdev && (netdev->netdev_ops == &ice_repr_vf_netdev_ops ||
+ netdev->netdev_ops == &ice_repr_sf_netdev_ops);
}
/**
* ice_repr_reg_netdev - register port representor netdev
* @netdev: pointer to port representor netdev
+ * @ops: new ops for netdev
*/
static int
-ice_repr_reg_netdev(struct net_device *netdev)
+ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops)
{
eth_hw_addr_random(netdev);
- netdev->netdev_ops = &ice_repr_netdev_ops;
+ netdev->netdev_ops = ops;
ice_set_ethtool_repr_ops(netdev);
netdev->hw_features |= NETIF_F_HW_TC;
@@ -283,57 +312,56 @@ ice_repr_reg_netdev(struct net_device *netdev)
return register_netdev(netdev);
}
-static void ice_repr_remove_node(struct devlink_port *devlink_port)
+static int ice_repr_ready_vf(struct ice_repr *repr)
+{
+ return !ice_check_vf_ready_for_cfg(repr->vf);
+}
+
+static int ice_repr_ready_sf(struct ice_repr *repr)
{
- devl_rate_leaf_destroy(devlink_port);
+ return !repr->sf->active;
}
/**
- * ice_repr_rem - remove representor from VF
+ * ice_repr_destroy - remove representor from VF
* @repr: pointer to representor structure
*/
-static void ice_repr_rem(struct ice_repr *repr)
+void ice_repr_destroy(struct ice_repr *repr)
{
free_percpu(repr->stats);
free_netdev(repr->netdev);
kfree(repr);
}
-/**
- * ice_repr_rem_vf - remove representor from VF
- * @repr: pointer to representor structure
- */
-void ice_repr_rem_vf(struct ice_repr *repr)
+static void ice_repr_rem_vf(struct ice_repr *repr)
{
- ice_repr_remove_node(&repr->vf->devlink_port);
ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
- ice_repr_rem(repr);
}
-static void ice_repr_set_tx_topology(struct ice_pf *pf)
+static void ice_repr_rem_sf(struct ice_repr *repr)
{
- struct devlink *devlink;
+ unregister_netdev(repr->netdev);
+ ice_devlink_destroy_sf_port(repr->sf);
+}
+static void ice_repr_set_tx_topology(struct ice_pf *pf, struct devlink *devlink)
+{
/* only export if ADQ and DCB disabled and eswitch enabled*/
if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
!ice_is_switchdev_running(pf))
return;
- devlink = priv_to_devlink(pf);
ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
}
/**
- * ice_repr_add - add representor for generic VSI
- * @pf: pointer to PF structure
+ * ice_repr_create - add representor for generic VSI
* @src_vsi: pointer to VSI structure of device to represent
- * @parent_mac: device MAC address
*/
-static struct ice_repr *
-ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
+static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi)
{
struct ice_netdev_priv *np;
struct ice_repr *repr;
@@ -360,7 +388,10 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
np = netdev_priv(repr->netdev);
np->repr = repr;
- ether_addr_copy(repr->parent_mac, parent_mac);
+ repr->netdev->min_mtu = ETH_MIN_MTU;
+ repr->netdev->max_mtu = ICE_MAX_MTU;
+
+ SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back));
return repr;
@@ -371,34 +402,18 @@ err_alloc:
return ERR_PTR(err);
}
-struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
+static int ice_repr_add_vf(struct ice_repr *repr)
{
- struct ice_repr *repr;
- struct ice_vsi *vsi;
+ struct ice_vf *vf = repr->vf;
+ struct devlink *devlink;
int err;
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return ERR_PTR(-ENOENT);
-
err = ice_devlink_create_vf_port(vf);
if (err)
- return ERR_PTR(err);
-
- repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
- if (IS_ERR(repr)) {
- err = PTR_ERR(repr);
- goto err_repr_add;
- }
-
- repr->vf = vf;
-
- repr->netdev->min_mtu = ETH_MIN_MTU;
- repr->netdev->max_mtu = ICE_MAX_MTU;
+ return err;
- SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
- err = ice_repr_reg_netdev(repr->netdev);
+ err = ice_repr_reg_netdev(repr->netdev, &ice_repr_vf_netdev_ops);
if (err)
goto err_netdev;
@@ -407,17 +422,97 @@ struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
goto err_cfg_vsi;
ice_virtchnl_set_repr_ops(vf);
- ice_repr_set_tx_topology(vf->pf);
- return repr;
+ devlink = priv_to_devlink(vf->pf);
+ ice_repr_set_tx_topology(vf->pf, devlink);
+
+ return 0;
err_cfg_vsi:
unregister_netdev(repr->netdev);
err_netdev:
- ice_repr_rem(repr);
-err_repr_add:
ice_devlink_destroy_vf_port(vf);
- return ERR_PTR(err);
+ return err;
+}
+
+/**
+ * ice_repr_create_vf - add representor for VF VSI
+ * @vf: VF to create port representor on
+ *
+ * Set correct representor type for VF and functions pointer.
+ *
+ * Return: created port representor on success, error otherwise
+ */
+struct ice_repr *ice_repr_create_vf(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_repr *repr;
+
+ if (!vsi)
+ return ERR_PTR(-EINVAL);
+
+ repr = ice_repr_create(vsi);
+ if (IS_ERR(repr))
+ return repr;
+
+ repr->type = ICE_REPR_TYPE_VF;
+ repr->vf = vf;
+ repr->ops.add = ice_repr_add_vf;
+ repr->ops.rem = ice_repr_rem_vf;
+ repr->ops.ready = ice_repr_ready_vf;
+
+ ether_addr_copy(repr->parent_mac, vf->hw_lan_addr);
+
+ return repr;
+}
+
+static int ice_repr_add_sf(struct ice_repr *repr)
+{
+ struct ice_dynamic_port *sf = repr->sf;
+ int err;
+
+ err = ice_devlink_create_sf_port(sf);
+ if (err)
+ return err;
+
+ SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port);
+ err = ice_repr_reg_netdev(repr->netdev, &ice_repr_sf_netdev_ops);
+ if (err)
+ goto err_netdev;
+
+ ice_repr_set_tx_topology(sf->vsi->back, priv_to_devlink(sf->vsi->back));
+
+ return 0;
+
+err_netdev:
+ ice_devlink_destroy_sf_port(sf);
+ return err;
+}
+
+/**
+ * ice_repr_create_sf - add representor for SF VSI
+ * @sf: SF to create port representor on
+ *
+ * Set correct representor type for SF and functions pointer.
+ *
+ * Return: created port representor on success, error otherwise
+ */
+struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf)
+{
+ struct ice_repr *repr = ice_repr_create(sf->vsi);
+
+ if (IS_ERR(repr))
+ return repr;
+
+ repr->type = ICE_REPR_TYPE_SF;
+ repr->sf = sf;
+ repr->ops.add = ice_repr_add_sf;
+ repr->ops.rem = ice_repr_rem_sf;
+ repr->ops.ready = ice_repr_ready_sf;
+
+ ether_addr_copy(repr->parent_mac, sf->hw_addr);
+
+ return repr;
}
struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index 488661b2900b..35bd93165e1e 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -15,19 +15,35 @@ struct ice_repr_pcpu_stats {
u64 tx_drops;
};
+enum ice_repr_type {
+ ICE_REPR_TYPE_VF,
+ ICE_REPR_TYPE_SF,
+};
+
struct ice_repr {
struct ice_vsi *src_vsi;
- struct ice_vf *vf;
struct net_device *netdev;
struct metadata_dst *dst;
struct ice_esw_br_port *br_port;
struct ice_repr_pcpu_stats __percpu *stats;
u32 id;
u8 parent_mac[ETH_ALEN];
+ enum ice_repr_type type;
+ union {
+ struct ice_vf *vf;
+ struct ice_dynamic_port *sf;
+ };
+ struct {
+ int (*add)(struct ice_repr *repr);
+ void (*rem)(struct ice_repr *repr);
+ int (*ready)(struct ice_repr *repr);
+ } ops;
};
-struct ice_repr *ice_repr_add_vf(struct ice_vf *vf);
-void ice_repr_rem_vf(struct ice_repr *repr);
+struct ice_repr *ice_repr_create_vf(struct ice_vf *vf);
+struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf);
+
+void ice_repr_destroy(struct ice_repr *repr);
void ice_repr_start_tx_queues(struct ice_repr *repr);
void ice_repr_stop_tx_queues(struct ice_repr *repr);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index ecf8f5d60292..6ca13c5dcb14 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -28,9 +28,8 @@ ice_sched_add_root_node(struct ice_port_info *pi,
if (!root)
return -ENOMEM;
- /* coverity[suspicious_sizeof] */
root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
- sizeof(*root), GFP_KERNEL);
+ sizeof(*root->children), GFP_KERNEL);
if (!root->children) {
devm_kfree(ice_hw_to_dev(hw), root);
return -ENOMEM;
@@ -186,10 +185,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
if (!node)
return -ENOMEM;
if (hw->max_children[layer]) {
- /* coverity[suspicious_sizeof] */
node->children = devm_kcalloc(ice_hw_to_dev(hw),
hw->max_children[layer],
- sizeof(*node), GFP_KERNEL);
+ sizeof(*node->children), GFP_KERNEL);
if (!node->children) {
devm_kfree(ice_hw_to_dev(hw), node);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
new file mode 100644
index 000000000000..75d7147e1c01
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_txrx.h"
+#include "ice_fltr.h"
+#include "ice_sf_eth.h"
+#include "devlink/devlink_port.h"
+#include "devlink/devlink.h"
+
+static const struct net_device_ops ice_sf_netdev_ops = {
+ .ndo_open = ice_open,
+ .ndo_stop = ice_stop,
+ .ndo_start_xmit = ice_start_xmit,
+ .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
+ .ndo_change_mtu = ice_change_mtu,
+ .ndo_get_stats64 = ice_get_stats64,
+ .ndo_tx_timeout = ice_tx_timeout,
+ .ndo_bpf = ice_xdp,
+ .ndo_xdp_xmit = ice_xdp_xmit,
+ .ndo_xsk_wakeup = ice_xsk_wakeup,
+};
+
+/**
+ * ice_sf_cfg_netdev - Allocate, configure and register a netdev
+ * @dyn_port: subfunction associated with configured netdev
+ * @devlink_port: subfunction devlink port to be linked with netdev
+ *
+ * Return: 0 on success, negative value on failure
+ */
+static int ice_sf_cfg_netdev(struct ice_dynamic_port *dyn_port,
+ struct devlink_port *devlink_port)
+{
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct ice_netdev_priv *np;
+ struct net_device *netdev;
+ int err;
+
+ netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
+ vsi->alloc_rxq);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
+ set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ ice_set_netdev_features(netdev);
+
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
+ netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
+
+ eth_hw_addr_set(netdev, dyn_port->hw_addr);
+ ether_addr_copy(netdev->perm_addr, dyn_port->hw_addr);
+ netdev->netdev_ops = &ice_sf_netdev_ops;
+ SET_NETDEV_DEVLINK_PORT(netdev, devlink_port);
+
+ err = register_netdev(netdev);
+ if (err) {
+ free_netdev(netdev);
+ vsi->netdev = NULL;
+ return -ENOMEM;
+ }
+ set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+}
+
+static void ice_sf_decfg_netdev(struct ice_vsi *vsi)
+{
+ unregister_netdev(vsi->netdev);
+ clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+}
+
+/**
+ * ice_sf_dev_probe - subfunction driver probe function
+ * @adev: pointer to the auxiliary device
+ * @id: pointer to the auxiliary_device id
+ *
+ * Configure VSI and netdev resources for the subfunction device.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_sf_dev_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+ struct ice_dynamic_port *dyn_port = sf_dev->dyn_port;
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct ice_pf *pf = dyn_port->pf;
+ struct device *dev = &adev->dev;
+ struct ice_sf_priv *priv;
+ struct devlink *devlink;
+ int err;
+
+ vsi->type = ICE_VSI_SF;
+ vsi->port_info = pf->hw.port_info;
+ vsi->flags = ICE_VSI_FLAG_INIT;
+
+ priv = ice_allocate_sf(&adev->dev, pf);
+ if (IS_ERR(priv)) {
+ dev_err(dev, "Subfunction devlink alloc failed");
+ return PTR_ERR(priv);
+ }
+
+ priv->dev = sf_dev;
+ sf_dev->priv = priv;
+ devlink = priv_to_devlink(priv);
+
+ devl_lock(devlink);
+
+ err = ice_vsi_cfg(vsi);
+ if (err) {
+ dev_err(dev, "Subfunction vsi config failed");
+ goto err_free_devlink;
+ }
+ vsi->sf = dyn_port;
+
+ ice_eswitch_update_repr(&dyn_port->repr_id, vsi);
+
+ err = ice_devlink_create_sf_dev_port(sf_dev);
+ if (err) {
+ dev_err(dev, "Cannot add ice virtual devlink port for subfunction");
+ goto err_vsi_decfg;
+ }
+
+ err = ice_sf_cfg_netdev(dyn_port, &sf_dev->priv->devlink_port);
+ if (err) {
+ dev_err(dev, "Subfunction netdev config failed");
+ goto err_devlink_destroy;
+ }
+
+ err = devl_port_fn_devlink_set(&dyn_port->devlink_port, devlink);
+ if (err) {
+ dev_err(dev, "Can't link devlink instance to SF devlink port");
+ goto err_netdev_decfg;
+ }
+
+ ice_napi_add(vsi);
+
+ devl_register(devlink);
+ devl_unlock(devlink);
+
+ dyn_port->attached = true;
+
+ return 0;
+
+err_netdev_decfg:
+ ice_sf_decfg_netdev(vsi);
+err_devlink_destroy:
+ ice_devlink_destroy_sf_dev_port(sf_dev);
+err_vsi_decfg:
+ ice_vsi_decfg(vsi);
+err_free_devlink:
+ devl_unlock(devlink);
+ devlink_free(devlink);
+ return err;
+}
+
+/**
+ * ice_sf_dev_remove - subfunction driver remove function
+ * @adev: pointer to the auxiliary device
+ *
+ * Deinitalize VSI and netdev resources for the subfunction device.
+ */
+static void ice_sf_dev_remove(struct auxiliary_device *adev)
+{
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+ struct ice_dynamic_port *dyn_port = sf_dev->dyn_port;
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(sf_dev->priv);
+ devl_lock(devlink);
+
+ ice_vsi_close(vsi);
+
+ ice_sf_decfg_netdev(vsi);
+ ice_devlink_destroy_sf_dev_port(sf_dev);
+ devl_unregister(devlink);
+ devl_unlock(devlink);
+ devlink_free(devlink);
+ ice_vsi_decfg(vsi);
+
+ dyn_port->attached = false;
+}
+
+static const struct auxiliary_device_id ice_sf_dev_id_table[] = {
+ { .name = "ice.sf", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ice_sf_dev_id_table);
+
+static struct auxiliary_driver ice_sf_driver = {
+ .name = "sf",
+ .probe = ice_sf_dev_probe,
+ .remove = ice_sf_dev_remove,
+ .id_table = ice_sf_dev_id_table
+};
+
+static DEFINE_XARRAY_ALLOC1(ice_sf_aux_id);
+
+/**
+ * ice_sf_driver_register - Register new auxiliary subfunction driver
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_sf_driver_register(void)
+{
+ return auxiliary_driver_register(&ice_sf_driver);
+}
+
+/**
+ * ice_sf_driver_unregister - Unregister new auxiliary subfunction driver
+ *
+ */
+void ice_sf_driver_unregister(void)
+{
+ auxiliary_driver_unregister(&ice_sf_driver);
+}
+
+/**
+ * ice_sf_dev_release - Release device associated with auxiliary device
+ * @device: pointer to the device
+ *
+ * Since most of the code for subfunction deactivation is handled in
+ * the remove handler, here just free tracking resources.
+ */
+static void ice_sf_dev_release(struct device *device)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(device);
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+
+ xa_erase(&ice_sf_aux_id, adev->id);
+ kfree(sf_dev);
+}
+
+/**
+ * ice_sf_eth_activate - Activate Ethernet subfunction port
+ * @dyn_port: the dynamic port instance for this subfunction
+ * @extack: extack for reporting error messages
+ *
+ * Activate the dynamic port as an Ethernet subfunction. Setup the netdev
+ * resources associated and initialize the auxiliary device.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int
+ice_sf_eth_activate(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = dyn_port->pf;
+ struct ice_sf_dev *sf_dev;
+ struct pci_dev *pdev;
+ int err;
+ u32 id;
+
+ err = xa_alloc(&ice_sf_aux_id, &id, NULL, xa_limit_32b,
+ GFP_KERNEL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF ID");
+ return err;
+ }
+
+ sf_dev = kzalloc(sizeof(*sf_dev), GFP_KERNEL);
+ if (!sf_dev) {
+ err = -ENOMEM;
+ NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF memory");
+ goto xa_erase;
+ }
+ pdev = pf->pdev;
+
+ sf_dev->dyn_port = dyn_port;
+ sf_dev->adev.id = id;
+ sf_dev->adev.name = "sf";
+ sf_dev->adev.dev.release = ice_sf_dev_release;
+ sf_dev->adev.dev.parent = &pdev->dev;
+
+ err = auxiliary_device_init(&sf_dev->adev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to initialize SF device");
+ goto sf_dev_free;
+ }
+
+ err = auxiliary_device_add(&sf_dev->adev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to add SF device");
+ goto aux_dev_uninit;
+ }
+
+ dyn_port->sf_dev = sf_dev;
+
+ return 0;
+
+aux_dev_uninit:
+ auxiliary_device_uninit(&sf_dev->adev);
+sf_dev_free:
+ kfree(sf_dev);
+xa_erase:
+ xa_erase(&ice_sf_aux_id, id);
+
+ return err;
+}
+
+/**
+ * ice_sf_eth_deactivate - Deactivate Ethernet subfunction port
+ * @dyn_port: the dynamic port instance for this subfunction
+ *
+ * Deactivate the Ethernet subfunction, removing its auxiliary device and the
+ * associated resources.
+ */
+void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port)
+{
+ struct ice_sf_dev *sf_dev = dyn_port->sf_dev;
+
+ auxiliary_device_delete(&sf_dev->adev);
+ auxiliary_device_uninit(&sf_dev->adev);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.h b/drivers/net/ethernet/intel/ice/ice_sf_eth.h
new file mode 100644
index 000000000000..c558cad0a183
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _ICE_SF_ETH_H_
+#define _ICE_SF_ETH_H_
+
+#include <linux/auxiliary_bus.h>
+#include "ice.h"
+
+struct ice_sf_dev {
+ struct auxiliary_device adev;
+ struct ice_dynamic_port *dyn_port;
+ struct ice_sf_priv *priv;
+};
+
+struct ice_sf_priv {
+ struct ice_sf_dev *dev;
+ struct devlink_port devlink_port;
+};
+
+static inline struct
+ice_sf_dev *ice_adev_to_sf_dev(struct auxiliary_device *adev)
+{
+ return container_of(adev, struct ice_sf_dev, adev);
+}
+
+int ice_sf_driver_register(void);
+void ice_sf_driver_unregister(void);
+
+int ice_sf_eth_activate(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack);
+void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port);
+#endif /* _ICE_SF_ETH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c
new file mode 100644
index 000000000000..3d7e96721cf9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023, Intel Corporation. */
+
+#include "ice_vsi_vlan_ops.h"
+#include "ice_vsi_vlan_lib.h"
+#include "ice_vlan_mode.h"
+#include "ice.h"
+#include "ice_sf_vsi_vlan_ops.h"
+
+void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ vlan_ops = &vsi->outer_vlan_ops;
+ else
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h
new file mode 100644
index 000000000000..8c44eafceea0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023, Intel Corporation. */
+
+#ifndef _ICE_SF_VSI_VLAN_OPS_H_
+#define _ICE_SF_VSI_VLAN_OPS_H_
+
+#include "ice_vsi_vlan_ops.h"
+
+struct ice_vsi;
+
+void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+
+#endif /* _ICE_SF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 55ef33208456..91cb393f616f 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -175,7 +175,7 @@ void ice_free_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
- ice_eswitch_detach(pf, vf);
+ ice_eswitch_detach_vf(pf, vf);
ice_dis_vf_qs(vf);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
@@ -598,7 +598,7 @@ static int ice_start_vfs(struct ice_pf *pf)
goto teardown;
}
- retval = ice_eswitch_attach(pf, vf);
+ retval = ice_eswitch_attach_vf(pf, vf);
if (retval) {
dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
vf->vf_id, retval);
@@ -1096,8 +1096,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
return -ENOENT;
vsi = ice_get_vf_vsi(vf);
- if (!vsi)
+ if (!vsi) {
+ ice_put_vf(vf);
return -ENOENT;
+ }
prev_msix = vf->num_msix;
prev_queues = vf->num_vf_qs;
@@ -1119,7 +1121,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (vf->first_vector_idx < 0)
goto unroll;
- if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) {
+ vsi->req_txq = queues;
+ vsi->req_rxq = queues;
+
+ if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
/* Try to rebuild with previous values */
needs_rebuild = true;
goto unroll;
@@ -1142,12 +1147,16 @@ unroll:
vf->num_msix = prev_msix;
vf->num_vf_qs = prev_queues;
vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
- if (vf->first_vector_idx < 0)
+ if (vf->first_vector_idx < 0) {
+ ice_put_vf(vf);
return -EINVAL;
+ }
if (needs_rebuild) {
- ice_vf_reconfig_vsi(vf);
- ice_vf_init_host_cfg(vf, vsi);
+ vsi->req_txq = prev_queues;
+ vsi->req_rxq = prev_queues;
+
+ ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
}
ice_ena_vf_mappings(vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index fe8847184cb1..0e740342e294 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -3194,7 +3194,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
- return 0;
+ return -EEXIST;
/* Update the previously created VSI list set with
* the new VSI ID passed in
@@ -3264,7 +3264,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
list_head = &sw->recp_list[recp_id].filt_rules;
list_for_each_entry(list_itr, list_head, list_entry) {
- if (list_itr->vsi_list_info) {
+ if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
map_info = list_itr->vsi_list_info;
if (test_bit(vsi_handle, map_info->vsi_map)) {
*vsi_list_id = map_info->vsi_list_id;
@@ -6322,8 +6322,6 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
if (!itr->vsi_list_info ||
!test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
continue;
- /* Clearing it so that the logic can add it back */
- clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
f_entry.fltr_info.vsi_handle = vsi_handle;
f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
/* update the src in case it is VSI num */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index e6923f8121a9..ea39b999a0d0 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -819,6 +819,17 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+ /* This is a specific case. The destination VSI index is
+ * overwritten by the source VSI index. This type of filter
+ * should allow the packet to go to the LAN, not to the
+ * VSI passed here. It should set LAN_EN bit only. However,
+ * the VSI must be a valid one. Setting source VSI index
+ * here is safe. Even if the result from switch is set LAN_EN
+ * and LB_EN (which normally will pass the packet to this VSI)
+ * packet won't be seen on the VSI, because local loopback is
+ * turned off.
+ */
+ rule_info.sw_act.vsi_handle = vsi->idx;
} else {
/* VF to VF */
rule_info.sw_act.flag |= ICE_FLTR_TX;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 8d25b6981269..8208055d6e7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -522,30 +522,6 @@ err:
}
/**
- * ice_rx_frame_truesize
- * @rx_ring: ptr to Rx ring
- * @size: size
- *
- * calculate the truesize with taking into the account PAGE_SIZE of
- * underlying arch
- */
-static unsigned int
-ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
-{
- unsigned int truesize;
-
-#if (PAGE_SIZE < 8192)
- truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
-#else
- truesize = rx_ring->rx_offset ?
- SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
- SKB_DATA_ALIGN(size);
-#endif
- return truesize;
-}
-
-/**
* ice_run_xdp - Executes an XDP program on initialized xdp_buff
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
@@ -837,16 +813,15 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
if (!dev_page_is_reusable(page))
return false;
-#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
return false;
-#else
+#if (PAGE_SIZE >= 8192)
#define ICE_LAST_OFFSET \
- (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072)
if (rx_buf->page_offset > ICE_LAST_OFFSET)
return false;
-#endif /* PAGE_SIZE < 8192) */
+#endif /* PAGE_SIZE >= 8192) */
/* If we have drained the page fragment pool we need to update
* the pagecnt_bias and page count so that we fully restock the
@@ -949,12 +924,7 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[ntc];
- rx_buf->pgcnt =
-#if (PAGE_SIZE < 8192)
- page_count(rx_buf->page);
-#else
- 0;
-#endif
+ rx_buf->pgcnt = page_count(rx_buf->page);
prefetchw(rx_buf->page);
if (!size)
@@ -1160,11 +1130,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
bool failure;
u32 first;
- /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
-#if (PAGE_SIZE < 8192)
- xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0);
-#endif
-
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (xdp_prog) {
xdp_ring = rx_ring->xdp_ring;
@@ -1223,10 +1188,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
offset;
xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
-#if (PAGE_SIZE > 4096)
- /* At larger PAGE_SIZE, frame_sz depend on len size */
- xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size);
-#endif
xdp_buff_clear_frags_flag(xdp);
} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
break;
@@ -2407,7 +2368,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
- if (ice_is_switchdev_running(vsi->back))
+ if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF)
ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 96037bef3e78..45768796691f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -61,6 +61,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
ICE_DBG_AQ_DESC | \
ICE_DBG_AQ_DESC_BUF | \
ICE_DBG_AQ_CMD)
+#define ICE_DBG_PARSER BIT_ULL(28)
#define ICE_DBG_USER BIT_ULL(31)
@@ -158,6 +159,7 @@ enum ice_vsi_type {
ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
ICE_VSI_CHNL = 4,
ICE_VSI_LB = 6,
+ ICE_VSI_SF = 9,
};
struct ice_link_status {
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 5635e9da2212..8c434689e3f7 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -256,7 +256,7 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
*
* It brings the VSI down and then reconfigures it with the hardware.
*/
-int ice_vf_reconfig_vsi(struct ice_vf *vf)
+static int ice_vf_reconfig_vsi(struct ice_vf *vf)
{
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
struct ice_pf *pf = vf->pf;
@@ -335,6 +335,13 @@ static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
} else {
+ /* clear possible previous port vlan config */
+ err = ice_vsi_clear_port_vlan(vsi);
+ if (err) {
+ dev_err(dev, "failed to clear port VLAN via VSI parameters for VF %u, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
err = ice_vsi_add_vlan_zero(vsi);
}
@@ -766,7 +773,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
- ice_eswitch_detach(pf, vf);
+ ice_eswitch_detach_vf(pf, vf);
vf->driver_caps = 0;
ice_vc_set_default_allowlist(vf);
@@ -782,7 +789,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
- ice_eswitch_attach(pf, vf);
+ ice_eswitch_attach_vf(pf, vf);
mutex_unlock(&vf->cfg_lock);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index fec16919ec19..be4266899690 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -12,6 +12,7 @@
#include <net/devlink.h>
#include <linux/avf/virtchnl.h>
#include "ice_type.h"
+#include "ice_flow.h"
#include "ice_virtchnl_fdir.h"
#include "ice_vsi_vlan_ops.h"
@@ -52,6 +53,12 @@ struct ice_mdd_vf_events {
u16 last_printed;
};
+/* Structure to store fdir fv entry */
+struct ice_fdir_prof_info {
+ struct ice_parser_profile prof;
+ u64 fdir_active_cnt;
+};
+
/* VF operations */
struct ice_vf_ops {
enum ice_disq_rst_src reset_type;
@@ -91,6 +98,7 @@ struct ice_vf {
u16 lan_vsi_idx; /* index into PF struct */
u16 ctrl_vsi_idx;
struct ice_vf_fdir fdir;
+ struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
/* first vector index of this VF in the PF space */
int first_vector_idx;
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 91ba7fe0eaee..0c7e77c0a09f 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -23,7 +23,6 @@
#warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files"
#endif
-int ice_vf_reconfig_vsi(struct ice_vf *vf);
void ice_initialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 1c6ce0c4ed4e..59f62306b9cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -461,6 +461,10 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_TC_U32 &&
+ vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_TC_U32;
+
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index b4feb0927687..14e3f0f89c78 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -26,6 +26,15 @@ enum ice_fdir_tunnel_type {
ICE_FDIR_TUNNEL_TYPE_NONE = 0,
ICE_FDIR_TUNNEL_TYPE_GTPU,
ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
+ ICE_FDIR_TUNNEL_TYPE_ECPRI,
+ ICE_FDIR_TUNNEL_TYPE_GTPU_INNER,
+ ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER,
+ ICE_FDIR_TUNNEL_TYPE_GRE,
+ ICE_FDIR_TUNNEL_TYPE_GTPOGRE,
+ ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER,
+ ICE_FDIR_TUNNEL_TYPE_GRE_INNER,
+ ICE_FDIR_TUNNEL_TYPE_L2TPV2,
+ ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER,
};
struct virtchnl_fdir_fltr_conf {
@@ -33,6 +42,11 @@ struct virtchnl_fdir_fltr_conf {
enum ice_fdir_tunnel_type ttype;
u64 inset_flag;
u32 flow_id;
+
+ struct ice_parser_profile *prof;
+ bool parser_ena;
+ u8 *pkt_buf;
+ u8 pkt_len;
};
struct virtchnl_fdir_inset_map {
@@ -787,6 +801,107 @@ err_exit:
}
/**
+ * ice_vc_fdir_is_raw_flow - check if FDIR flow is raw (binary)
+ * @proto: virtchnl protocol headers
+ *
+ * Check if the FDIR rule is raw flow (protocol agnostic flow) or not. Note
+ * that common FDIR rule must have non-zero proto->count. Thus, we choose the
+ * tunnel_level and count of proto as the indicators. If both tunnel_level and
+ * count of proto are zero, this FDIR rule will be regarded as raw flow.
+ *
+ * Returns: true if headers describe raw flow, false otherwise.
+ */
+static bool
+ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs *proto)
+{
+ return (proto->tunnel_level == 0 && proto->count == 0);
+}
+
+/**
+ * ice_vc_fdir_parse_raw - parse a virtchnl raw FDIR rule
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ * @conf: FDIR configuration for each filter
+ *
+ * Parse the virtual channel filter's raw flow and store it in @conf
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int
+ice_vc_fdir_parse_raw(struct ice_vf *vf,
+ struct virtchnl_proto_hdrs *proto,
+ struct virtchnl_fdir_fltr_conf *conf)
+{
+ u8 *pkt_buf, *msk_buf __free(kfree);
+ struct ice_parser_result rslt;
+ struct ice_pf *pf = vf->pf;
+ struct ice_parser *psr;
+ int status = -ENOMEM;
+ struct ice_hw *hw;
+ u16 udp_port = 0;
+
+ pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
+ msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
+ if (!pkt_buf || !msk_buf)
+ goto err_mem_alloc;
+
+ memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len);
+ memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len);
+
+ hw = &pf->hw;
+
+ /* Get raw profile info via Parser Lib */
+ psr = ice_parser_create(hw);
+ if (IS_ERR(psr)) {
+ status = PTR_ERR(psr);
+ goto err_mem_alloc;
+ }
+
+ ice_parser_dvm_set(psr, ice_is_dvm_ena(hw));
+
+ if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN))
+ ice_parser_vxlan_tunnel_set(psr, udp_port, true);
+
+ status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt);
+ if (status)
+ goto err_parser_destroy;
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_parser_result_dump(hw, &rslt);
+
+ conf->prof = kzalloc(sizeof(*conf->prof), GFP_KERNEL);
+ if (!conf->prof) {
+ status = -ENOMEM;
+ goto err_parser_destroy;
+ }
+
+ status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
+ proto->raw.pkt_len, ICE_BLK_FD,
+ conf->prof);
+ if (status)
+ goto err_parser_profile_init;
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_parser_profile_dump(hw, conf->prof);
+
+ /* Store raw flow info into @conf */
+ conf->pkt_len = proto->raw.pkt_len;
+ conf->pkt_buf = pkt_buf;
+ conf->parser_ena = true;
+
+ ice_parser_destroy(psr);
+ return 0;
+
+err_parser_profile_init:
+ kfree(conf->prof);
+err_parser_destroy:
+ ice_parser_destroy(psr);
+err_mem_alloc:
+ kfree(pkt_buf);
+ return status;
+}
+
+/**
* ice_vc_fdir_parse_pattern
* @vf: pointer to the VF info
* @fltr: virtual channel add cmd buffer
@@ -813,6 +928,10 @@ ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
return -EINVAL;
}
+ /* For raw FDIR filters created by the parser */
+ if (ice_vc_fdir_is_raw_flow(proto))
+ return ice_vc_fdir_parse_raw(vf, proto, conf);
+
for (i = 0; i < proto->count; i++) {
struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
struct ip_esp_hdr *esph;
@@ -1101,8 +1220,10 @@ ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
int ret;
- if (!ice_vc_validate_pattern(vf, proto))
- return -EINVAL;
+ /* For raw FDIR filters created by the parser */
+ if (!ice_vc_fdir_is_raw_flow(proto))
+ if (!ice_vc_validate_pattern(vf, proto))
+ return -EINVAL;
ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
if (ret)
@@ -1295,11 +1416,15 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
return -ENOMEM;
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
- if (ret) {
- dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
- vf->vf_id, input->flow_type);
- goto err_free_pkt;
+ if (conf->parser_ena) {
+ memcpy(pkt, conf->pkt_buf, conf->pkt_len);
+ } else {
+ ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
+ if (ret) {
+ dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
+ vf->vf_id, input->flow_type);
+ goto err_free_pkt;
+ }
}
ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
@@ -1521,6 +1646,16 @@ err_exit:
return ret;
}
+static int ice_fdir_is_tunnel(enum ice_fdir_tunnel_type ttype)
+{
+ return (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_ECPRI ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER);
+}
+
/**
* ice_vc_add_fdir_fltr_post
* @vf: pointer to the VF structure
@@ -1782,6 +1917,158 @@ static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
}
/**
+ * ice_vc_parser_fv_check_diff - check two parsed FDIR profile fv context
+ * @fv_a: struct of parsed FDIR profile field vector
+ * @fv_b: struct of parsed FDIR profile field vector
+ *
+ * Check if the two parsed FDIR profile field vector context are different,
+ * including proto_id, offset and mask.
+ *
+ * Return: true on different, false on otherwise.
+ */
+static bool ice_vc_parser_fv_check_diff(struct ice_parser_fv *fv_a,
+ struct ice_parser_fv *fv_b)
+{
+ return (fv_a->proto_id != fv_b->proto_id ||
+ fv_a->offset != fv_b->offset ||
+ fv_a->msk != fv_b->msk);
+}
+
+/**
+ * ice_vc_parser_fv_save - save parsed FDIR profile fv context
+ * @fv: struct of parsed FDIR profile field vector
+ * @fv_src: parsed FDIR profile field vector context to save
+ *
+ * Save the parsed FDIR profile field vector context, including proto_id,
+ * offset and mask.
+ *
+ * Return: Void.
+ */
+static void ice_vc_parser_fv_save(struct ice_parser_fv *fv,
+ struct ice_parser_fv *fv_src)
+{
+ fv->proto_id = fv_src->proto_id;
+ fv->offset = fv_src->offset;
+ fv->msk = fv_src->msk;
+ fv->spec = 0;
+}
+
+/**
+ * ice_vc_add_fdir_raw - add a raw FDIR filter for VF
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ * @v_ret: the final VIRTCHNL code
+ * @stat: pointer to the VIRTCHNL_OP_ADD_FDIR_FILTER
+ * @len: length of the stat
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int
+ice_vc_add_fdir_raw(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf,
+ enum virtchnl_status_code *v_ret,
+ struct virtchnl_fdir_add *stat, int len)
+{
+ struct ice_vsi *vf_vsi, *ctrl_vsi;
+ struct ice_fdir_prof_info *pi;
+ struct ice_pf *pf = vf->pf;
+ int ret, ptg, id, i;
+ struct device *dev;
+ struct ice_hw *hw;
+ bool fv_found;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ *v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+
+ id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
+ ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
+
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id);
+ return -ENODEV;
+ }
+
+ ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
+ if (!ctrl_vsi) {
+ dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n",
+ vf->vf_id);
+ return -ENODEV;
+ }
+
+ fv_found = false;
+
+ /* Check if profile info already exists, then update the counter */
+ pi = &vf->fdir_prof_info[ptg];
+ if (pi->fdir_active_cnt != 0) {
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++)
+ if (ice_vc_parser_fv_check_diff(&pi->prof.fv[i],
+ &conf->prof->fv[i]))
+ break;
+ if (i == ICE_MAX_FV_WORDS) {
+ fv_found = true;
+ pi->fdir_active_cnt++;
+ }
+ }
+
+ /* HW profile setting is only required for the first time */
+ if (!fv_found) {
+ ret = ice_flow_set_parser_prof(hw, vf_vsi->idx,
+ ctrl_vsi->idx, conf->prof,
+ ICE_BLK_FD);
+
+ if (ret) {
+ *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "VF %d: insert hw prof failed\n",
+ vf->vf_id);
+ return ret;
+ }
+ }
+
+ ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
+ if (ret) {
+ *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "VF %d: insert FDIR list failed\n",
+ vf->vf_id);
+ return ret;
+ }
+
+ ret = ice_vc_fdir_set_irq_ctx(vf, conf,
+ VIRTCHNL_OP_ADD_FDIR_FILTER);
+ if (ret) {
+ dev_dbg(dev, "VF %d: set FDIR context failed\n",
+ vf->vf_id);
+ goto err_rem_entry;
+ }
+
+ ret = ice_vc_fdir_write_fltr(vf, conf, true, false);
+ if (ret) {
+ dev_err(dev, "VF %d: adding FDIR raw flow rule failed, ret:%d\n",
+ vf->vf_id, ret);
+ goto err_clr_irq;
+ }
+
+ /* Save parsed profile fv info of the FDIR rule for the first time */
+ if (!fv_found) {
+ for (i = 0; i < conf->prof->fv_num; i++)
+ ice_vc_parser_fv_save(&pi->prof.fv[i],
+ &conf->prof->fv[i]);
+ pi->prof.fv_num = conf->prof->fv_num;
+ pi->fdir_active_cnt = 1;
+ }
+
+ return 0;
+
+err_clr_irq:
+ ice_vc_fdir_clear_irq_ctx(vf);
+err_rem_entry:
+ ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+ return ret;
+}
+
+/**
* ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1846,7 +2133,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
len = sizeof(*stat);
ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
if (ret) {
- v_ret = VIRTCHNL_STATUS_SUCCESS;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
goto err_free_conf;
@@ -1861,6 +2148,15 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto exit;
}
+ /* For raw FDIR filters created by the parser */
+ if (conf->parser_ena) {
+ ret = ice_vc_add_fdir_raw(vf, conf, &v_ret, stat, len);
+ if (ret)
+ goto err_free_conf;
+ goto exit;
+ }
+
+ is_tun = ice_fdir_is_tunnel(conf->ttype);
ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
@@ -1922,6 +2218,78 @@ err_exit:
}
/**
+ * ice_vc_del_fdir_raw - delete a raw FDIR filter for VF
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ * @v_ret: the final VIRTCHNL code
+ * @stat: pointer to the VIRTCHNL_OP_DEL_FDIR_FILTER
+ * @len: length of the stat
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int
+ice_vc_del_fdir_raw(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf,
+ enum virtchnl_status_code *v_ret,
+ struct virtchnl_fdir_del *stat, int len)
+{
+ struct ice_vsi *vf_vsi, *ctrl_vsi;
+ enum ice_block blk = ICE_BLK_FD;
+ struct ice_fdir_prof_info *pi;
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+ struct ice_hw *hw;
+ unsigned long id;
+ u16 vsi_num;
+ int ptg;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ *v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+
+ id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
+ ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
+
+ ret = ice_vc_fdir_write_fltr(vf, conf, false, false);
+ if (ret) {
+ dev_err(dev, "VF %u: deleting FDIR raw flow rule failed: %d\n",
+ vf->vf_id, ret);
+ return ret;
+ }
+
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
+ return -ENODEV;
+ }
+
+ ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
+ if (!ctrl_vsi) {
+ dev_err(dev, "Can not get FDIR ctrl_vsi for VF %u\n",
+ vf->vf_id);
+ return -ENODEV;
+ }
+
+ pi = &vf->fdir_prof_info[ptg];
+ if (pi->fdir_active_cnt != 0) {
+ pi->fdir_active_cnt--;
+ /* Remove the profile id flow if no active FDIR rule left */
+ if (!pi->fdir_active_cnt) {
+ vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi->idx);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, id);
+
+ vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, id);
+ }
+ }
+
+ conf->parser_ena = false;
+ return 0;
+}
+
+/**
* ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1933,7 +2301,10 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
struct virtchnl_fdir_del *stat = NULL;
struct virtchnl_fdir_fltr_conf *conf;
+ struct ice_vf_fdir *fdir = &vf->fdir;
enum virtchnl_status_code v_ret;
+ struct ice_fdir_fltr *input;
+ enum ice_fltr_ptype flow;
struct device *dev;
struct ice_pf *pf;
int is_tun = 0;
@@ -1983,6 +2354,15 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto err_exit;
}
+ /* For raw FDIR filters created by the parser */
+ if (conf->parser_ena) {
+ ret = ice_vc_del_fdir_raw(vf, conf, &v_ret, stat, len);
+ if (ret)
+ goto err_del_tmr;
+ goto exit;
+ }
+
+ is_tun = ice_fdir_is_tunnel(conf->ttype);
ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
@@ -1992,6 +2372,13 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto err_del_tmr;
}
+ /* Remove unused profiles to avoid unexpected behaviors */
+ input = &conf->input;
+ flow = input->flow_type;
+ if (fdir->fdir_fltr_cnt[flow][is_tun] == 1)
+ ice_vc_fdir_rem_prof(vf, flow, is_tun);
+
+exit:
kfree(stat);
return ret;
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 6e8f2aab6080..5291f2888ef8 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -787,3 +787,60 @@ int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi)
kfree(ctxt);
return err;
}
+
+int ice_vsi_clear_port_vlan(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+
+ ctxt->info.port_based_outer_vlan = 0;
+ ctxt->info.port_based_inner_vlan = 0;
+
+ ctxt->info.inner_vlan_flags =
+ FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL);
+ if (ice_is_dvm_ena(hw)) {
+ ctxt->info.inner_vlan_flags |=
+ FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
+ ctxt->info.outer_vlan_flags =
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL);
+ ctxt->info.outer_vlan_flags |=
+ FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M,
+ ICE_AQ_VSI_OUTER_TAG_VLAN_8100);
+ ctxt->info.outer_vlan_flags |=
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
+ }
+
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID |
+ ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing port based VLAN failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ } else {
+ vsi->info.port_based_outer_vlan =
+ ctxt->info.port_based_outer_vlan;
+ vsi->info.port_based_inner_vlan =
+ ctxt->info.port_based_inner_vlan;
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+ }
+
+ kfree(ctxt);
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
index f0d84d11bd5b..12b227621a7d 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
@@ -36,5 +36,6 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid);
int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi);
int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi);
+int ice_vsi_clear_port_vlan(struct ice_vsi *vsi);
#endif /* _ICE_VSI_VLAN_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
index 7aae7fdcfcdb..8c7a9b41fb63 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
@@ -3,6 +3,7 @@
#include "ice_pf_vsi_vlan_ops.h"
#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_sf_vsi_vlan_ops.h"
#include "ice_lib.h"
#include "ice.h"
@@ -77,6 +78,9 @@ void ice_vsi_init_vlan_ops(struct ice_vsi *vsi)
case ICE_VSI_VF:
ice_vf_vsi_init_vlan_ops(vsi);
break;
+ case ICE_VSI_SF:
+ ice_sf_vsi_init_vlan_ops(vsi);
+ break;
default:
dev_dbg(ice_pf_to_dev(vsi->back), "%s does not support VLAN operations\n",
ice_vsi_type_str(vsi->type));
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 240a7bec242b..334ae945d640 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -39,7 +39,7 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
- if (ice_is_xdp_ena_vsi(vsi))
+ if (vsi->xdp_rings)
memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
}
@@ -52,7 +52,7 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
{
ice_clean_tx_ring(vsi->tx_rings[q_idx]);
- if (ice_is_xdp_ena_vsi(vsi))
+ if (vsi->xdp_rings)
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
ice_clean_rx_ring(vsi->rx_rings[q_idx]);
}
@@ -165,7 +165,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
struct ice_q_vector *q_vector;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
- int timeout = 50;
int fail = 0;
int err;
@@ -176,13 +175,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
rx_ring = vsi->rx_rings[q_idx];
q_vector = rx_ring->q_vector;
- while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
- timeout--;
- if (!timeout)
- return -EBUSY;
- usleep_range(1000, 2000);
- }
-
synchronize_net();
netif_carrier_off(vsi->netdev);
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
@@ -194,7 +186,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
if (!fail)
fail = err;
- if (ice_is_xdp_ena_vsi(vsi)) {
+ if (vsi->xdp_rings) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
memset(&txq_meta, 0, sizeof(txq_meta));
@@ -261,7 +253,6 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
netif_carrier_on(vsi->netdev);
}
- clear_bit(ICE_CFG_BUSY, vsi->state);
return fail;
}
@@ -298,7 +289,7 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
int err;
- if (vsi->type != ICE_VSI_PF)
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF)
return -EINVAL;
if (qid >= vsi->netdev->real_num_rx_queues ||
@@ -390,7 +381,8 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
goto failure;
}
- if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
+ if_running = !test_bit(ICE_VSI_DOWN, vsi->state) &&
+ ice_is_xdp_ena_vsi(vsi);
if (if_running) {
struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 3df9935685e9..6c913a703df6 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -97,8 +97,10 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl = idpf_get_reg_addr(adapter,
reg_vals[vec_id].dyn_ctl_reg);
intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M;
+ intr->dyn_ctl_intena_msk_m = PF_GLINT_DYN_CTL_INTENA_MSK_M;
intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S;
intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S;
+ intr->dyn_ctl_wb_on_itr_m = PF_GLINT_DYN_CTL_WB_ON_ITR_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_PF_ITR_IDX_SPACING);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 0b6c8fd5bc90..4f20343e49a9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -357,24 +357,11 @@ int idpf_intr_req(struct idpf_adapter *adapter)
goto free_msix;
}
- if (adapter->req_vec_chunks) {
- struct virtchnl2_vector_chunks *vchunks;
- struct virtchnl2_alloc_vectors *ac;
-
- ac = adapter->req_vec_chunks;
- vchunks = &ac->vchunks;
-
- num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
- vchunks);
- if (num_vec_ids < v_actual) {
- err = -EINVAL;
- goto free_vecids;
- }
- } else {
- int i;
-
- for (i = 0; i < v_actual; i++)
- vecids[i] = i;
+ num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
+ &adapter->req_vec_chunks->vchunks);
+ if (num_vec_ids < v_actual) {
+ err = -EINVAL;
+ goto free_vecids;
}
for (vector = 0; vector < v_actual; vector++) {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index fe64febf7436..dfd7cf1d9aa0 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */
#include <net/libeth/rx.h>
+#include <net/libeth/tx.h>
#include "idpf.h"
@@ -224,6 +225,7 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
dma_unmap_addr_set(tx_buf, dma, dma);
+ tx_buf->type = LIBETH_SQE_FRAG;
/* align size to end of page */
max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
@@ -237,14 +239,17 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
offsets,
max_data,
td_tag);
- tx_desc++;
- i++;
-
- if (i == tx_q->desc_count) {
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_buf = &tx_q->tx_buf[0];
tx_desc = &tx_q->base_tx[0];
i = 0;
+ } else {
+ tx_buf++;
+ tx_desc++;
}
+ tx_buf->type = LIBETH_SQE_EMPTY;
+
dma += max_data;
size -= max_data;
@@ -257,12 +262,14 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
size, td_tag);
- tx_desc++;
- i++;
- if (i == tx_q->desc_count) {
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_buf = &tx_q->tx_buf[0];
tx_desc = &tx_q->base_tx[0];
i = 0;
+ } else {
+ tx_buf++;
+ tx_desc++;
}
size = skb_frag_size(frag);
@@ -270,8 +277,6 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
DMA_TO_DEVICE);
-
- tx_buf = &tx_q->tx_buf[i];
}
skb_tx_timestamp(first->skb);
@@ -282,13 +287,13 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
size, td_tag);
- IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
+ first->type = LIBETH_SQE_SKB;
+ first->rs_idx = i;
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
+ IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- netdev_tx_sent_queue(nq, first->bytecount);
+ netdev_tx_sent_queue(nq, first->bytes);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
@@ -306,8 +311,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq)
struct idpf_base_tx_ctx_desc *ctx_desc;
int ntu = txq->next_to_use;
- memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
- txq->tx_buf[ntu].ctx_entry = true;
+ txq->tx_buf[ntu].type = LIBETH_SQE_CTX;
ctx_desc = &txq->base_ctx[ntu];
@@ -371,6 +375,10 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
IDPF_TX_DESCS_FOR_CTX)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
+ u64_stats_update_end(&tx_q->stats_sync);
+
return NETDEV_TX_BUSY;
}
@@ -396,11 +404,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
first->skb = skb;
if (tso) {
- first->gso_segs = offload.tso_segs;
- first->bytecount = skb->len + ((first->gso_segs - 1) * offload.tso_hdr_len);
+ first->packets = offload.tso_segs;
+ first->bytes = skb->len + ((first->packets - 1) * offload.tso_hdr_len);
} else {
- first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
- first->gso_segs = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ first->packets = 1;
}
idpf_tx_singleq_map(tx_q, first, &offload);
@@ -420,10 +428,15 @@ out_drop:
static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
int *cleaned)
{
- unsigned int total_bytes = 0, total_pkts = 0;
+ struct libeth_sq_napi_stats ss = { };
struct idpf_base_tx_desc *tx_desc;
u32 budget = tx_q->clean_budget;
s16 ntc = tx_q->next_to_clean;
+ struct libeth_cq_pp cp = {
+ .dev = tx_q->dev,
+ .ss = &ss,
+ .napi = napi_budget,
+ };
struct idpf_netdev_priv *np;
struct idpf_tx_buf *tx_buf;
struct netdev_queue *nq;
@@ -441,47 +454,26 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
* such. We can skip this descriptor since there is no buffer
* to clean.
*/
- if (tx_buf->ctx_entry) {
- /* Clear this flag here to avoid stale flag values when
- * this buffer is used for actual data in the future.
- * There are cases where the tx_buf struct / the flags
- * field will not be cleared before being reused.
- */
- tx_buf->ctx_entry = false;
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX)) {
+ tx_buf->type = LIBETH_SQE_EMPTY;
goto fetch_next_txq_desc;
}
- /* if next_to_watch is not set then no work pending */
- eop_desc = (struct idpf_base_tx_desc *)tx_buf->next_to_watch;
- if (!eop_desc)
+ if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
break;
- /* prevent any other reads prior to eop_desc */
+ /* prevent any other reads prior to type */
smp_rmb();
+ eop_desc = &tx_q->base_tx[tx_buf->rs_idx];
+
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->qw1 &
cpu_to_le64(IDPF_TX_DESC_DTYPE_DESC_DONE)))
break;
- /* clear next_to_watch to prevent false hangs */
- tx_buf->next_to_watch = NULL;
-
/* update the statistics for this packet */
- total_bytes += tx_buf->bytecount;
- total_pkts += tx_buf->gso_segs;
-
- napi_consume_skb(tx_buf->skb, napi_budget);
-
- /* unmap skb header data */
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
-
- /* clear tx_buf data */
- tx_buf->skb = NULL;
- dma_unmap_len_set(tx_buf, len, 0);
+ libeth_tx_complete(tx_buf, &cp);
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
@@ -495,13 +487,7 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
}
/* unmap any remaining paged data */
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ libeth_tx_complete(tx_buf, &cp);
}
/* update budget only if we did something */
@@ -521,11 +507,11 @@ fetch_next_txq_desc:
ntc += tx_q->desc_count;
tx_q->next_to_clean = ntc;
- *cleaned += total_pkts;
+ *cleaned += ss.packets;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_add(&tx_q->q_stats.packets, total_pkts);
- u64_stats_add(&tx_q->q_stats.bytes, total_bytes);
+ u64_stats_add(&tx_q->q_stats.packets, ss.packets);
+ u64_stats_add(&tx_q->q_stats.bytes, ss.bytes);
u64_stats_update_end(&tx_q->stats_sync);
np = netdev_priv(tx_q->netdev);
@@ -533,7 +519,7 @@ fetch_next_txq_desc:
dont_wake = np->state != __IDPF_VPORT_UP ||
!netif_carrier_ok(tx_q->netdev);
- __netif_txq_completed_wake(nq, total_pkts, total_bytes,
+ __netif_txq_completed_wake(nq, ss.packets, ss.bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
dont_wake);
@@ -1134,8 +1120,10 @@ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget)
&work_done);
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ idpf_vport_intr_set_wb_on_itr(q_vector);
return budget;
+ }
work_done = min_t(int, work_done, budget - 1);
@@ -1144,6 +1132,8 @@ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget)
*/
if (likely(napi_complete_done(napi, work_done)))
idpf_vport_intr_update_itr_ena_irq(q_vector);
+ else
+ idpf_vport_intr_set_wb_on_itr(q_vector);
return work_done;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 585c3dadd9bf..d4e6f0e10487 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2,10 +2,19 @@
/* Copyright (C) 2023 Intel Corporation */
#include <net/libeth/rx.h>
+#include <net/libeth/tx.h>
#include "idpf.h"
#include "idpf_virtchnl.h"
+struct idpf_tx_stash {
+ struct hlist_node hlist;
+ struct libeth_sqe buf;
+};
+
+#define idpf_tx_buf_compl_tag(buf) (*(u32 *)&(buf)->priv)
+LIBETH_SQE_CHECK_PRIV(u32);
+
static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
unsigned int count);
@@ -61,41 +70,20 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
- * idpf_tx_buf_rel - Release a Tx buffer
- * @tx_q: the queue that owns the buffer
- * @tx_buf: the buffer to free
- */
-static void idpf_tx_buf_rel(struct idpf_tx_queue *tx_q,
- struct idpf_tx_buf *tx_buf)
-{
- if (tx_buf->skb) {
- if (dma_unmap_len(tx_buf, len))
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dev_kfree_skb_any(tx_buf->skb);
- } else if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- }
-
- tx_buf->next_to_watch = NULL;
- tx_buf->skb = NULL;
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
- dma_unmap_len_set(tx_buf, len, 0);
-}
-
-/**
* idpf_tx_buf_rel_all - Free any empty Tx buffers
* @txq: queue to be cleaned
*/
static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
{
+ struct libeth_sq_napi_stats ss = { };
struct idpf_buf_lifo *buf_stack;
- u16 i;
+ struct idpf_tx_stash *stash;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
+ struct hlist_node *tmp;
+ u32 i, tag;
/* Buffers already cleared, nothing to do */
if (!txq->tx_buf)
@@ -103,7 +91,7 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
/* Free all the Tx buffer sk_buffs */
for (i = 0; i < txq->desc_count; i++)
- idpf_tx_buf_rel(txq, &txq->tx_buf[i]);
+ libeth_tx_complete(&txq->tx_buf[i], &cp);
kfree(txq->tx_buf);
txq->tx_buf = NULL;
@@ -115,6 +103,20 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
if (!buf_stack->bufs)
return;
+ /*
+ * If a Tx timeout occurred, there are potentially still bufs in the
+ * hash table, free them here.
+ */
+ hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash,
+ hlist) {
+ if (!stash)
+ continue;
+
+ libeth_tx_complete(&stash->buf, &cp);
+ hash_del(&stash->hlist);
+ idpf_buf_lifo_push(buf_stack, stash);
+ }
+
for (i = 0; i < buf_stack->size; i++)
kfree(buf_stack->bufs[i]);
@@ -131,6 +133,7 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
{
idpf_tx_buf_rel_all(txq);
+ netdev_tx_reset_subqueue(txq->netdev, txq->idx);
if (!txq->desc_ring)
return;
@@ -203,10 +206,6 @@ static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
if (!tx_q->tx_buf)
return -ENOMEM;
- /* Initialize tx_bufs with invalid completion tags */
- for (i = 0; i < tx_q->desc_count; i++)
- tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
-
if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
return 0;
@@ -1656,37 +1655,6 @@ static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
}
/**
- * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of
- * packet
- * @tx_q: tx queue to clean buffer from
- * @tx_buf: buffer to be cleaned
- * @cleaned: pointer to stats struct to track cleaned packets/bytes
- * @napi_budget: Used to determine if we are in netpoll
- */
-static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q,
- struct idpf_tx_buf *tx_buf,
- struct idpf_cleaned_stats *cleaned,
- int napi_budget)
-{
- napi_consume_skb(tx_buf->skb, napi_budget);
-
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
-
- dma_unmap_len_set(tx_buf, len, 0);
- }
-
- /* clear tx_buf data */
- tx_buf->skb = NULL;
-
- cleaned->bytes += tx_buf->bytecount;
- cleaned->packets += tx_buf->gso_segs;
-}
-
-/**
* idpf_tx_clean_stashed_bufs - clean bufs that were stored for
* out of order completions
* @txq: queue to clean
@@ -1696,33 +1664,28 @@ static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q,
*/
static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
u16 compl_tag,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
struct idpf_tx_stash *stash;
struct hlist_node *tmp_buf;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = cleaned,
+ .napi = budget,
+ };
/* Buffer completion */
hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
hlist, compl_tag) {
- if (unlikely(stash->buf.compl_tag != (int)compl_tag))
+ if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag))
continue;
- if (stash->buf.skb) {
- idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned,
- budget);
- } else if (dma_unmap_len(&stash->buf, len)) {
- dma_unmap_page(txq->dev,
- dma_unmap_addr(&stash->buf, dma),
- dma_unmap_len(&stash->buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(&stash->buf, len, 0);
- }
+ hash_del(&stash->hlist);
+ libeth_tx_complete(&stash->buf, &cp);
/* Push shadow buf back onto stack */
idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
-
- hash_del(&stash->hlist);
}
}
@@ -1737,8 +1700,7 @@ static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
{
struct idpf_tx_stash *stash;
- if (unlikely(!dma_unmap_addr(tx_buf, dma) &&
- !dma_unmap_len(tx_buf, len)))
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
return 0;
stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
@@ -1751,29 +1713,27 @@ static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
/* Store buffer params in shadow buffer */
stash->buf.skb = tx_buf->skb;
- stash->buf.bytecount = tx_buf->bytecount;
- stash->buf.gso_segs = tx_buf->gso_segs;
+ stash->buf.bytes = tx_buf->bytes;
+ stash->buf.packets = tx_buf->packets;
+ stash->buf.type = tx_buf->type;
+ stash->buf.nr_frags = tx_buf->nr_frags;
dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
- stash->buf.compl_tag = tx_buf->compl_tag;
+ idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf);
/* Add buffer to buf_hash table to be freed later */
hash_add(txq->stash->sched_buf_hash, &stash->hlist,
- stash->buf.compl_tag);
-
- memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
+ idpf_tx_buf_compl_tag(&stash->buf));
- /* Reinitialize buf_id portion of tag */
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ tx_buf->type = LIBETH_SQE_EMPTY;
return 0;
}
#define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \
do { \
- (ntc)++; \
- if (unlikely(!(ntc))) { \
- ntc -= (txq)->desc_count; \
+ if (unlikely(++(ntc) == (txq)->desc_count)) { \
+ ntc = 0; \
buf = (txq)->tx_buf; \
desc = &(txq)->flex_tx[0]; \
} else { \
@@ -1797,69 +1757,71 @@ do { \
* Separate packet completion events will be reported on the completion queue,
* and the buffers will be cleaned separately. The stats are not updated from
* this function when using flow-based scheduling.
+ *
+ * Furthermore, in flow scheduling mode, check to make sure there are enough
+ * reserve buffers to stash the packet. If there are not, return early, which
+ * will leave next_to_clean pointing to the packet that failed to be stashed.
+ *
+ * Return: false in the scenario above, true otherwise.
*/
-static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
+static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
int napi_budget,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
bool descs_only)
{
union idpf_tx_flex_desc *next_pending_desc = NULL;
union idpf_tx_flex_desc *tx_desc;
- s16 ntc = tx_q->next_to_clean;
+ u32 ntc = tx_q->next_to_clean;
+ struct libeth_cq_pp cp = {
+ .dev = tx_q->dev,
+ .ss = cleaned,
+ .napi = napi_budget,
+ };
struct idpf_tx_buf *tx_buf;
+ bool clean_complete = true;
tx_desc = &tx_q->flex_tx[ntc];
next_pending_desc = &tx_q->flex_tx[end];
tx_buf = &tx_q->tx_buf[ntc];
- ntc -= tx_q->desc_count;
while (tx_desc != next_pending_desc) {
- union idpf_tx_flex_desc *eop_desc;
+ u32 eop_idx;
/* If this entry in the ring was used as a context descriptor,
- * it's corresponding entry in the buffer ring will have an
- * invalid completion tag since no buffer was used. We can
- * skip this descriptor since there is no buffer to clean.
+ * it's corresponding entry in the buffer ring is reserved. We
+ * can skip this descriptor since there is no buffer to clean.
*/
- if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG))
+ if (tx_buf->type <= LIBETH_SQE_CTX)
goto fetch_next_txq_desc;
- eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch;
+ if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
+ break;
- /* clear next_to_watch to prevent false hangs */
- tx_buf->next_to_watch = NULL;
+ eop_idx = tx_buf->rs_idx;
if (descs_only) {
- if (idpf_stash_flow_sch_buffers(tx_q, tx_buf))
+ if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) {
+ clean_complete = false;
goto tx_splitq_clean_out;
+ }
+
+ idpf_stash_flow_sch_buffers(tx_q, tx_buf);
- while (tx_desc != eop_desc) {
+ while (ntc != eop_idx) {
idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
tx_desc, tx_buf);
-
- if (dma_unmap_len(tx_buf, len)) {
- if (idpf_stash_flow_sch_buffers(tx_q,
- tx_buf))
- goto tx_splitq_clean_out;
- }
+ idpf_stash_flow_sch_buffers(tx_q, tx_buf);
}
} else {
- idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned,
- napi_budget);
+ libeth_tx_complete(tx_buf, &cp);
/* unmap remaining buffers */
- while (tx_desc != eop_desc) {
+ while (ntc != eop_idx) {
idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
tx_desc, tx_buf);
/* unmap any remaining paged data */
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ libeth_tx_complete(tx_buf, &cp);
}
}
@@ -1868,8 +1830,9 @@ fetch_next_txq_desc:
}
tx_splitq_clean_out:
- ntc += tx_q->desc_count;
tx_q->next_to_clean = ntc;
+
+ return clean_complete;
}
#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \
@@ -1895,57 +1858,68 @@ do { \
* this completion tag.
*/
static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
u16 idx = compl_tag & txq->compl_tag_bufid_m;
struct idpf_tx_buf *tx_buf = NULL;
- u16 ntc = txq->next_to_clean;
- u16 num_descs_cleaned = 0;
- u16 orig_idx = idx;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = cleaned,
+ .napi = budget,
+ };
+ u16 ntc, orig_idx = idx;
tx_buf = &txq->tx_buf[idx];
- while (tx_buf->compl_tag == (int)compl_tag) {
- if (tx_buf->skb) {
- idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget);
- } else if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(txq->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX ||
+ idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
+ return false;
+
+ if (tx_buf->type == LIBETH_SQE_SKB)
+ libeth_tx_complete(tx_buf, &cp);
- memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
- num_descs_cleaned++;
+ while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) {
+ libeth_tx_complete(tx_buf, &cp);
idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
}
- /* If we didn't clean anything on the ring for this completion, there's
- * nothing more to do.
- */
- if (unlikely(!num_descs_cleaned))
- return false;
-
- /* Otherwise, if we did clean a packet on the ring directly, it's safe
- * to assume that the descriptors starting from the original
- * next_to_clean up until the previously cleaned packet can be reused.
- * Therefore, we will go back in the ring and stash any buffers still
- * in the ring into the hash table to be cleaned later.
+ /*
+ * It's possible the packet we just cleaned was an out of order
+ * completion, which means we can stash the buffers starting from
+ * the original next_to_clean and reuse the descriptors. We need
+ * to compare the descriptor ring next_to_clean packet's "first" buffer
+ * to the "first" buffer of the packet we just cleaned to determine if
+ * this is the case. Howevever, next_to_clean can point to either a
+ * reserved buffer that corresponds to a context descriptor used for the
+ * next_to_clean packet (TSO packet) or the "first" buffer (single
+ * packet). The orig_idx from the packet we just cleaned will always
+ * point to the "first" buffer. If next_to_clean points to a reserved
+ * buffer, let's bump ntc once and start the comparison from there.
*/
+ ntc = txq->next_to_clean;
tx_buf = &txq->tx_buf[ntc];
- while (tx_buf != &txq->tx_buf[orig_idx]) {
- idpf_stash_flow_sch_buffers(txq, tx_buf);
+
+ if (tx_buf->type == LIBETH_SQE_CTX)
idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
- }
- /* Finally, update next_to_clean to reflect the work that was just done
- * on the ring, if any. If the packet was only cleaned from the hash
- * table, the ring will not be impacted, therefore we should not touch
- * next_to_clean. The updated idx is used here
+ /*
+ * If ntc still points to a different "first" buffer, clean the
+ * descriptor ring and stash all of the buffers for later cleaning. If
+ * we cannot stash all of the buffers, next_to_clean will point to the
+ * "first" buffer of the packet that could not be stashed and cleaning
+ * will start there next time.
+ */
+ if (unlikely(tx_buf != &txq->tx_buf[orig_idx] &&
+ !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned,
+ true)))
+ return true;
+
+ /*
+ * Otherwise, update next_to_clean to reflect the cleaning that was
+ * done above.
*/
txq->next_to_clean = idx;
@@ -1965,7 +1939,7 @@ static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
*/
static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
struct idpf_splitq_tx_compl_desc *desc,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
u16 compl_tag;
@@ -1973,7 +1947,8 @@ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
- return idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
+ idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
+ return;
}
compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
@@ -2008,7 +1983,7 @@ static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
ntc -= complq->desc_count;
do {
- struct idpf_cleaned_stats cleaned_stats = { };
+ struct libeth_sq_napi_stats cleaned_stats = { };
struct idpf_tx_queue *tx_q;
int rel_tx_qid;
u16 hw_head;
@@ -2158,29 +2133,6 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
}
/**
- * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions
- * @tx_q: the queue to be checked
- * @size: number of descriptors we want to assure is available
- *
- * Returns 0 if stop is not needed
- */
-int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size)
-{
- struct netdev_queue *nq;
-
- if (likely(IDPF_DESC_UNUSED(tx_q) >= size))
- return 0;
-
- u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.q_busy);
- u64_stats_update_end(&tx_q->stats_sync);
-
- nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
-
- return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
-}
-
-/**
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked
* @descs_needed: number of descriptors required for this packet
@@ -2191,7 +2143,7 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
unsigned int descs_needed)
{
if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
- goto splitq_stop;
+ goto out;
/* If there are too many outstanding completions expected on the
* completion queue, stop the TX queue to give the device some time to
@@ -2210,10 +2162,12 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
return 0;
splitq_stop:
+ netif_stop_subqueue(tx_q->netdev, tx_q->idx);
+
+out:
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
- netif_stop_subqueue(tx_q->netdev, tx_q->idx);
return -EBUSY;
}
@@ -2236,7 +2190,11 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
tx_q->next_to_use = val;
- idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
+ if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
+ u64_stats_update_end(&tx_q->stats_sync);
+ }
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -2307,6 +2265,12 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 idx)
{
+ struct libeth_sq_napi_stats ss = { };
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
+
u64_stats_update_begin(&txq->stats_sync);
u64_stats_inc(&txq->q_stats.dma_map_errs);
u64_stats_update_end(&txq->stats_sync);
@@ -2316,7 +2280,7 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *tx_buf;
tx_buf = &txq->tx_buf[idx];
- idpf_tx_buf_rel(txq, tx_buf);
+ libeth_tx_complete(tx_buf, &cp);
if (tx_buf == first)
break;
if (idx == 0)
@@ -2395,6 +2359,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
tx_buf = first;
+ first->nr_frags = 0;
params->compl_tag =
(tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
@@ -2405,7 +2370,9 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
if (dma_mapping_error(tx_q->dev, dma))
return idpf_tx_dma_map_error(tx_q, skb, first, i);
- tx_buf->compl_tag = params->compl_tag;
+ first->nr_frags++;
+ idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
+ tx_buf->type = LIBETH_SQE_FRAG;
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
@@ -2459,14 +2426,15 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
max_data);
- tx_desc++;
- i++;
-
- if (i == tx_q->desc_count) {
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_buf = tx_q->tx_buf;
tx_desc = &tx_q->flex_tx[0];
i = 0;
tx_q->compl_tag_cur_gen =
IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+ } else {
+ tx_buf++;
+ tx_desc++;
}
/* Since this packet has a buffer that is going to span
@@ -2479,8 +2447,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
* simply pass over these holes and finish cleaning the
* rest of the packet.
*/
- memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
- tx_q->tx_buf[i].compl_tag = params->compl_tag;
+ tx_buf->type = LIBETH_SQE_EMPTY;
/* Adjust the DMA offset and the remaining size of the
* fragment. On the first iteration of this loop,
@@ -2504,13 +2471,15 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
break;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
- tx_desc++;
- i++;
- if (i == tx_q->desc_count) {
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_buf = tx_q->tx_buf;
tx_desc = &tx_q->flex_tx[0];
i = 0;
tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+ } else {
+ tx_buf++;
+ tx_desc++;
}
size = skb_frag_size(frag);
@@ -2518,26 +2487,24 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
DMA_TO_DEVICE);
-
- tx_buf = &tx_q->tx_buf[i];
}
/* record SW timestamp if HW timestamp is not available */
skb_tx_timestamp(skb);
+ first->type = LIBETH_SQE_SKB;
+
/* write last descriptor with RS and EOP bits */
+ first->rs_idx = i;
td_cmd |= params->eop_cmd;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
i = idpf_tx_splitq_bump_ntu(tx_q, i);
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
-
tx_q->txq_grp->num_completions_pending++;
/* record bytecount for BQL */
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- netdev_tx_sent_queue(nq, first->bytecount);
+ netdev_tx_sent_queue(nq, first->bytes);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
@@ -2737,8 +2704,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
struct idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
- memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
- txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ txq->tx_buf[i].type = LIBETH_SQE_CTX;
/* grab the next descriptor */
desc = &txq->flex_ctx[i];
@@ -2822,12 +2788,12 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
first->skb = skb;
if (tso) {
- first->gso_segs = tx_params.offload.tso_segs;
- first->bytecount = skb->len +
- ((first->gso_segs - 1) * tx_params.offload.tso_hdr_len);
+ first->packets = tx_params.offload.tso_segs;
+ first->bytes = skb->len +
+ ((first->packets - 1) * tx_params.offload.tso_hdr_len);
} else {
- first->gso_segs = 1;
- first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
+ first->packets = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
}
if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
@@ -3749,6 +3715,7 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
/* net_dim() updates ITR out-of-band using a work item */
idpf_net_dim(q_vector);
+ q_vector->wb_on_itr = false;
intval = idpf_vport_intr_buildreg_itr(q_vector,
IDPF_NO_ITR_UPDATE_IDX, 0);
@@ -4051,8 +4018,10 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ idpf_vport_intr_set_wb_on_itr(q_vector);
return budget;
+ }
work_done = min_t(int, work_done, budget - 1);
@@ -4061,6 +4030,8 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
*/
if (likely(napi_complete_done(napi, work_done)))
idpf_vport_intr_update_itr_ena_irq(q_vector);
+ else
+ idpf_vport_intr_set_wb_on_itr(q_vector);
/* Switch to poll mode in the tear-down path after sending disable
* queues virtchnl message, as the interrupts will be disabled after
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 6215dbee5546..f0537826f840 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -127,11 +127,10 @@ do { \
*/
#define IDPF_TX_COMPLQ_PENDING(txq) \
(((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
- 0 : U64_MAX) + \
+ 0 : U32_MAX) + \
(txq)->num_completions_pending - (txq)->complq->num_completions)
#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
-#define IDPF_SPLITQ_TX_INVAL_COMPL_TAG -1
/* Adjust the generation for the completion tag and wrap if necessary */
#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
@@ -149,47 +148,7 @@ union idpf_tx_flex_desc {
struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
};
-/**
- * struct idpf_tx_buf
- * @next_to_watch: Next descriptor to clean
- * @skb: Pointer to the skb
- * @dma: DMA address
- * @len: DMA length
- * @bytecount: Number of bytes
- * @gso_segs: Number of GSO segments
- * @compl_tag: Splitq only, unique identifier for a buffer. Used to compare
- * with completion tag returned in buffer completion event.
- * Because the completion tag is expected to be the same in all
- * data descriptors for a given packet, and a single packet can
- * span multiple buffers, we need this field to track all
- * buffers associated with this completion tag independently of
- * the buf_id. The tag consists of a N bit buf_id and M upper
- * order "generation bits". See compl_tag_bufid_m and
- * compl_tag_gen_s in struct idpf_queue. We'll use a value of -1
- * to indicate the tag is not valid.
- * @ctx_entry: Singleq only. Used to indicate the corresponding entry
- * in the descriptor ring was used for a context descriptor and
- * this buffer entry should be skipped.
- */
-struct idpf_tx_buf {
- void *next_to_watch;
- struct sk_buff *skb;
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(len);
- unsigned int bytecount;
- unsigned short gso_segs;
-
- union {
- int compl_tag;
-
- bool ctx_entry;
- };
-};
-
-struct idpf_tx_stash {
- struct hlist_node hlist;
- struct idpf_tx_buf buf;
-};
+#define idpf_tx_buf libeth_sqe
/**
* struct idpf_buf_lifo - LIFO for managing OOO completions
@@ -390,9 +349,11 @@ struct idpf_vec_regs {
* struct idpf_intr_reg
* @dyn_ctl: Dynamic control interrupt register
* @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
+ * @dyn_ctl_intena_msk_m: Mask for dyn_ctl interrupt enable mask
* @dyn_ctl_itridx_s: Register bit offset for ITR index
* @dyn_ctl_itridx_m: Mask for ITR index
* @dyn_ctl_intrvl_s: Register bit offset for ITR interval
+ * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
* @rx_itr: RX ITR register
* @tx_itr: TX ITR register
* @icr_ena: Interrupt cause register offset
@@ -401,9 +362,11 @@ struct idpf_vec_regs {
struct idpf_intr_reg {
void __iomem *dyn_ctl;
u32 dyn_ctl_intena_m;
+ u32 dyn_ctl_intena_msk_m;
u32 dyn_ctl_itridx_s;
u32 dyn_ctl_itridx_m;
u32 dyn_ctl_intrvl_s;
+ u32 dyn_ctl_wb_on_itr_m;
void __iomem *rx_itr;
void __iomem *tx_itr;
void __iomem *icr_ena;
@@ -424,6 +387,7 @@ struct idpf_intr_reg {
* @intr_reg: See struct idpf_intr_reg
* @napi: napi handler
* @total_events: Number of interrupts processed
+ * @wb_on_itr: whether WB on ITR is enabled
* @tx_dim: Data for TX net_dim algorithm
* @tx_itr_value: TX interrupt throttling rate
* @tx_intr_mode: Dynamic ITR or not
@@ -454,6 +418,7 @@ struct idpf_q_vector {
__cacheline_group_begin_aligned(read_write);
struct napi_struct napi;
u16 total_events;
+ bool wb_on_itr;
struct dim tx_dim;
u16 tx_itr_value;
@@ -472,7 +437,7 @@ struct idpf_q_vector {
cpumask_var_t affinity_mask;
__cacheline_group_end_aligned(cold);
};
-libeth_cacheline_set_assert(struct idpf_q_vector, 104,
+libeth_cacheline_set_assert(struct idpf_q_vector, 112,
424 + 2 * sizeof(struct dim),
8 + sizeof(cpumask_var_t));
@@ -496,11 +461,6 @@ struct idpf_tx_queue_stats {
u64_stats_t dma_map_errs;
};
-struct idpf_cleaned_stats {
- u32 packets;
- u32 bytes;
-};
-
#define IDPF_ITR_DYNAMIC 1
#define IDPF_ITR_MAX 0x1FE0
#define IDPF_ITR_20K 0x0032
@@ -688,7 +648,7 @@ struct idpf_tx_queue {
void *desc_ring;
};
- struct idpf_tx_buf *tx_buf;
+ struct libeth_sqe *tx_buf;
struct idpf_txq_group *txq_grp;
struct device *dev;
void __iomem *tail;
@@ -831,7 +791,7 @@ struct idpf_compl_queue {
u32 next_to_use;
u32 next_to_clean;
- u32 num_completions;
+ aligned_u64 num_completions;
__cacheline_group_end_aligned(read_write);
__cacheline_group_begin_aligned(cold);
@@ -963,7 +923,7 @@ struct idpf_txq_group {
struct idpf_compl_queue *complq;
- u32 num_completions_pending;
+ aligned_u64 num_completions_pending;
};
static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
@@ -1033,6 +993,25 @@ static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
}
+/**
+ * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
+ * @q_vector: pointer to queue vector struct
+ */
+static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
+{
+ struct idpf_intr_reg *reg;
+
+ if (q_vector->wb_on_itr)
+ return;
+
+ q_vector->wb_on_itr = true;
+ reg = &q_vector->intr_reg;
+
+ writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m |
+ (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s),
+ reg->dyn_ctl);
+}
+
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg);
@@ -1064,7 +1043,6 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 ring_idx);
unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
struct sk_buff *skb);
-int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q);
@@ -1073,4 +1051,12 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
+static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
+ u32 needed)
+{
+ return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+ IDPF_DESC_UNUSED(tx_q),
+ needed, needed);
+}
+
#endif /* !_IDPF_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 629cb5cb7c9f..aad62e270ae4 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -97,7 +97,10 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl = idpf_get_reg_addr(adapter,
reg_vals[vec_id].dyn_ctl_reg);
intr->dyn_ctl_intena_m = VF_INT_DYN_CTLN_INTENA_M;
+ intr->dyn_ctl_intena_msk_m = VF_INT_DYN_CTLN_INTENA_MSK_M;
intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S;
+ intr->dyn_ctl_intrvl_s = VF_INT_DYN_CTLN_INTERVAL_S;
+ intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_VF_ITR_IDX_SPACING);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 70986e12da28..15c00a01f1c0 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -666,7 +666,7 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
if (ctlq_msg->data_len) {
payload = ctlq_msg->ctx.indirect.payload->va;
- payload_size = ctlq_msg->ctx.indirect.payload->size;
+ payload_size = ctlq_msg->data_len;
}
xn->reply_sz = payload_size;
@@ -1295,10 +1295,6 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
err = reply_sz;
goto free_vport_params;
}
- if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) {
- err = -EIO;
- goto free_vport_params;
- }
return 0;
@@ -2602,9 +2598,6 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
if (reply_sz < 0)
return reply_sz;
- if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN)
- return -EIO;
-
ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
if (ptypes_recvd > max_ptype)
return -EINVAL;
@@ -3088,9 +3081,9 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
return;
- idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_task(adapter);
idpf_intr_rel(adapter);
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 06b9970dffad..ca6ccbc13954 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2387,15 +2387,11 @@ static int igb_get_ts_info(struct net_device *dev,
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
switch (adapter->hw.mac.type) {
case e1000_82575:
info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
case e1000_82576:
case e1000_82580:
@@ -2405,8 +2401,6 @@ static int igb_get_ts_info(struct net_device *dev,
case e1000_i211:
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 11be39f435f3..f1d088168723 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -33,6 +33,7 @@
#include <linux/bpf_trace.h>
#include <linux/pm_runtime.h>
#include <linux/etherdevice.h>
+#include <linux/lockdep.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -2914,8 +2915,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+/* This function assumes __netif_tx_lock is held by the caller. */
static void igb_xdp_ring_update_tail(struct igb_ring *ring)
{
+ lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*/
@@ -3000,11 +3004,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
nxmit++;
}
- __netif_tx_unlock(nq);
-
if (unlikely(flags & XDP_XMIT_FLUSH))
igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
+
return nxmit;
}
@@ -4808,6 +4812,7 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
#if (PAGE_SIZE < 8192)
if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB ||
+ IGB_2K_TOO_SMALL_WITH_PADDING ||
rd32(E1000_RCTL) & E1000_RCTL_SBP)
set_ring_uses_large_buffer(rx_ring);
#endif
@@ -6959,10 +6964,20 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
static void igb_tsync_interrupt(struct igb_adapter *adapter)
{
+ const u32 mask = (TSINTR_SYS_WRAP | E1000_TSICR_TXTS |
+ TSINTR_TT0 | TSINTR_TT1 |
+ TSINTR_AUTT0 | TSINTR_AUTT1);
struct e1000_hw *hw = &adapter->hw;
u32 tsicr = rd32(E1000_TSICR);
struct ptp_clock_event event;
+ if (hw->mac.type == e1000_82580) {
+ /* 82580 has a hardware bug that requires an explicit
+ * write to clear the TimeSync interrupt cause.
+ */
+ wr32(E1000_TSICR, tsicr & mask);
+ }
+
if (tsicr & TSINTR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
@@ -8853,12 +8868,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
+ unsigned int total_bytes = 0, total_packets = 0;
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *rx_ring = q_vector->rx.ring;
- struct sk_buff *skb = rx_ring->skb;
- unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
+ struct sk_buff *skb = rx_ring->skb;
+ int cpu = smp_processor_id();
unsigned int xdp_xmit = 0;
+ struct netdev_queue *nq;
struct xdp_buff xdp;
u32 frame_sz = 0;
int rx_buf_pgcnt;
@@ -8986,7 +9003,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (xdp_xmit & IGB_XDP_TX) {
struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
}
u64_stats_update_begin(&rx_ring->rx_syncp);
@@ -9631,6 +9651,10 @@ static void igb_io_resume(struct pci_dev *pdev)
struct igb_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev)) {
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_dbg(&pdev->dev, "Resuming from non-fatal error, do nothing.\n");
+ return;
+ }
if (igb_up(adapter)) {
dev_err(&pdev->dev, "igb_up failed after reset\n");
return;
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 7b83678ba83a..6ad35a00a287 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -282,7 +282,6 @@ enum igbvf_state_t {
extern char igbvf_driver_name[];
-void igbvf_check_options(struct igbvf_adapter *);
void igbvf_set_ethtool_ops(struct net_device *);
int igbvf_up(struct igbvf_adapter *);
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
index e5b31818d565..7637d21445bf 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.h
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
@@ -49,7 +49,6 @@
#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
-void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
s32 e1000_init_mbx_params_vf(struct e1000_hw *);
#endif /* _E1000_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index c38b4d0f00ce..eac0f966e0e4 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -259,6 +259,10 @@ struct igc_adapter {
*/
spinlock_t qbv_tx_lock;
+ bool strict_priority_enable;
+ u8 num_tc;
+ u16 queue_per_tc[IGC_MAX_TX_QUEUES];
+
/* OS defined structs */
struct pci_dev *pdev;
/* lock for statistics */
@@ -382,9 +386,11 @@ extern char igc_driver_name[];
#define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_TSN_QAV_ENABLED BIT(18)
+#define IGC_FLAG_TSN_LEGACY_ENABLED BIT(19)
-#define IGC_FLAG_TSN_ANY_ENABLED \
- (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED)
+#define IGC_FLAG_TSN_ANY_ENABLED \
+ (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED | \
+ IGC_FLAG_TSN_LEGACY_ENABLED)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
@@ -681,6 +687,7 @@ enum igc_ring_flags_t {
IGC_RING_FLAG_TX_DETECT_HANG,
IGC_RING_FLAG_AF_XDP_ZC,
IGC_RING_FLAG_TX_HWTSTAMP,
+ IGC_RING_FLAG_RX_ALLOC_FAILED,
};
#define ring_uses_large_buffer(ring) \
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 5f92b3c7c3d4..8e449904aa7d 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -4,6 +4,8 @@
#ifndef _IGC_DEFINES_H_
#define _IGC_DEFINES_H_
+#include <linux/bitfield.h>
+
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define REQ_TX_DESCRIPTOR_MULTIPLE 8
#define REQ_RX_DESCRIPTOR_MULTIPLE 8
@@ -176,7 +178,6 @@
/* PHY GPY 211 registers */
#define STANDARD_AN_REG_MASK 0x0007 /* MMD */
-#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */
#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */
#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */
@@ -404,6 +405,12 @@
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
+/* Retry Buffer Control */
+#define IGC_RETX_CTL 0x041C
+#define IGC_RETX_CTL_WATERMARK_MASK 0xF
+#define IGC_RETX_CTL_QBVFULLTH_SHIFT 8 /* QBV Retry Buffer Full Threshold */
+#define IGC_RETX_CTL_QBVFULLEN 0x1000 /* Enable QBV Retry Buffer Full Threshold */
+
/* Transmit Scheduling Latency */
/* Latency between transmission scheduling (LaunchTime) and the time
* the packet is transmitted to the network in nanosecond.
@@ -547,6 +554,15 @@
#define IGC_MAX_SR_QUEUES 2
+#define IGC_TXARB_TXQ_PRIO_0_MASK GENMASK(1, 0)
+#define IGC_TXARB_TXQ_PRIO_1_MASK GENMASK(3, 2)
+#define IGC_TXARB_TXQ_PRIO_2_MASK GENMASK(5, 4)
+#define IGC_TXARB_TXQ_PRIO_3_MASK GENMASK(7, 6)
+#define IGC_TXARB_TXQ_PRIO_0(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_0_MASK, (x))
+#define IGC_TXARB_TXQ_PRIO_1(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_1_MASK, (x))
+#define IGC_TXARB_TXQ_PRIO_2(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_2_MASK, (x))
+#define IGC_TXARB_TXQ_PRIO_3(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_3_MASK, (x))
+
/* Receive Checksum Control */
#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
@@ -635,6 +651,16 @@
#define IGC_MDIC_READY 0x10000000
#define IGC_MDIC_ERROR 0x40000000
+/* EEE Link Ability */
+#define IGC_EEE_2500BT_MASK BIT(0)
+#define IGC_EEE_1000BT_MASK BIT(2)
+#define IGC_EEE_100BT_MASK BIT(1)
+
+/* EEE Link-Partner Ability */
+#define IGC_LP_EEE_2500BT_MASK BIT(0)
+#define IGC_LP_EEE_1000BT_MASK BIT(2)
+#define IGC_LP_EEE_100BT_MASK BIT(1)
+
#define IGC_N0_QUEUE -1
#define IGC_MAX_MAC_HDR_LEN 127
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 3d3ef4e1547c..5b0c6f433767 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1540,6 +1540,10 @@ static int igc_ethtool_set_channels(struct net_device *netdev,
if (ch->other_count != NON_Q_VECTORS)
return -EINVAL;
+ /* Do not allow channel reconfiguration when mqprio is enabled */
+ if (adapter->strict_priority_enable)
+ return -EINVAL;
+
/* Verify the number of channels doesn't exceed hw limits */
max_combined = igc_get_max_rss_queues(adapter);
if (count > max_combined)
@@ -1565,15 +1569,11 @@ static int igc_ethtool_get_ts_info(struct net_device *dev,
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
switch (adapter->hw.mac.type) {
case igc_i225:
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -1627,8 +1627,11 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
- u32 eeer;
+ struct igc_phy_info *phy = &hw->phy;
+ u16 eee_advert, eee_lp_advert;
+ u32 eeer, ret_val;
+ /* EEE supported */
linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
edata->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
@@ -1636,6 +1639,74 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
edata->supported);
+ /* EEE Advertisement 1 - reg 7.60 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ IGC_ANEG_EEE_AB1,
+ &eee_advert);
+ if (ret_val) {
+ netdev_err(adapter->netdev,
+ "Failed to read IEEE 7.60 register\n");
+ return -EINVAL;
+ }
+
+ if (eee_advert & IGC_EEE_1000BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised);
+
+ if (eee_advert & IGC_EEE_100BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->advertised);
+
+ /* EEE Advertisement 2 - reg 7.62 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ IGC_ANEG_EEE_AB2,
+ &eee_advert);
+ if (ret_val) {
+ netdev_err(adapter->netdev,
+ "Failed to read IEEE 7.62 register\n");
+ return -EINVAL;
+ }
+
+ if (eee_advert & IGC_EEE_2500BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ edata->advertised);
+
+ /* EEE Link-Partner Ability 1 - reg 7.61 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ IGC_ANEG_EEE_LP_AB1,
+ &eee_lp_advert);
+ if (ret_val) {
+ netdev_err(adapter->netdev,
+ "Failed to read IEEE 7.61 register\n");
+ return -EINVAL;
+ }
+
+ if (eee_lp_advert & IGC_LP_EEE_1000BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->lp_advertised);
+
+ if (eee_lp_advert & IGC_LP_EEE_100BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->lp_advertised);
+
+ /* EEE Link-Partner Ability 2 - reg 7.63 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ IGC_ANEG_EEE_LP_AB2,
+ &eee_lp_advert);
+ if (ret_val) {
+ netdev_err(adapter->netdev,
+ "Failed to read IEEE 7.63 register\n");
+ return -EINVAL;
+ }
+
+ if (eee_lp_advert & IGC_LP_EEE_2500BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ edata->lp_advertised);
+
eeer = rd32(IGC_EEER);
/* EEE status on negotiated link */
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 8daf938afc36..6e70bca15db1 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2191,6 +2191,7 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -2207,6 +2208,7 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
__free_page(page);
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -2658,6 +2660,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
if (!skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
+ set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
@@ -2738,6 +2741,7 @@ static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
skb = igc_construct_skb_zc(ring, xdp);
if (!skb) {
ring->rx_stats.alloc_failed++;
+ set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags);
return;
}
@@ -5807,11 +5811,29 @@ no_wait:
if (adapter->flags & IGC_FLAG_HAS_MSIX) {
u32 eics = 0;
- for (i = 0; i < adapter->num_q_vectors; i++)
- eics |= adapter->q_vector[i]->eims_value;
- wr32(IGC_EICS, eics);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igc_q_vector *q_vector = adapter->q_vector[i];
+ struct igc_ring *rx_ring;
+
+ if (!q_vector->rx.ring)
+ continue;
+
+ rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index];
+
+ if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ eics |= q_vector->eims_value;
+ clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ }
+ }
+ if (eics)
+ wr32(IGC_EICS, eics);
} else {
- wr32(IGC_ICS, IGC_ICS_RXDMT0);
+ struct igc_ring *rx_ring = adapter->rx_ring[0];
+
+ if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ wr32(IGC_ICS, IGC_ICS_RXDMT0);
+ }
}
igc_ptp_tx_hang(adapter);
@@ -6315,12 +6337,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!validate_schedule(adapter, qopt))
return -EINVAL;
+ igc_ptp_read(adapter, &now);
+
+ if (igc_tsn_is_taprio_activated_by_user(adapter) &&
+ is_base_time_past(qopt->base_time, &now))
+ adapter->qbv_config_change_errors++;
+
adapter->cycle_time = qopt->cycle_time;
adapter->base_time = qopt->base_time;
adapter->taprio_offload_enable = true;
- igc_ptp_read(adapter, &now);
-
for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n];
@@ -6511,6 +6537,13 @@ static int igc_tc_query_caps(struct igc_adapter *adapter,
struct igc_hw *hw = &adapter->hw;
switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
@@ -6528,6 +6561,65 @@ static int igc_tc_query_caps(struct igc_adapter *adapter,
}
}
+static void igc_save_mqprio_params(struct igc_adapter *adapter, u8 num_tc,
+ u16 *offset)
+{
+ int i;
+
+ adapter->strict_priority_enable = true;
+ adapter->num_tc = num_tc;
+
+ for (i = 0; i < num_tc; i++)
+ adapter->queue_per_tc[i] = offset[i];
+}
+
+static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int i;
+
+ if (hw->mac.type != igc_i225)
+ return -EOPNOTSUPP;
+
+ if (!mqprio->qopt.num_tc) {
+ adapter->strict_priority_enable = false;
+ goto apply;
+ }
+
+ /* There are as many TCs as Tx queues. */
+ if (mqprio->qopt.num_tc != adapter->num_tx_queues) {
+ NL_SET_ERR_MSG_FMT_MOD(mqprio->extack,
+ "Only %d traffic classes supported",
+ adapter->num_tx_queues);
+ return -EOPNOTSUPP;
+ }
+
+ /* Only one queue per TC is supported. */
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ if (mqprio->qopt.count[i] != 1) {
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "Only one queue per TC supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* Preemption is not supported yet. */
+ if (mqprio->preemptible_tcs) {
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "Preemption is not supported yet");
+ return -EOPNOTSUPP;
+ }
+
+ igc_save_mqprio_params(adapter, mqprio->qopt.num_tc,
+ mqprio->qopt.offset);
+
+ mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+apply:
+ return igc_tsn_offload_apply(adapter);
+}
+
static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -6547,6 +6639,9 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
case TC_SETUP_QDISC_CBS:
return igc_tsn_enable_cbs(adapter, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return igc_tsn_enable_mqprio(adapter, type_data);
+
default:
return -EOPNOTSUPP;
}
@@ -7409,6 +7504,7 @@ static void igc_io_resume(struct pci_dev *pdev)
rtnl_lock();
if (netif_running(netdev)) {
if (igc_open(netdev)) {
+ rtnl_unlock();
netdev_err(netdev, "igc_open failed after reset\n");
return;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 861f37076861..2801e5f24df9 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -240,7 +240,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
/* Read the MULTI GBT AN Control Register - reg 7.32 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
- ANEG_MULTIGBT_AN_CTRL,
+ IGC_ANEG_MULTIGBT_AN_CTRL,
&aneg_multigbt_an_ctrl);
if (ret_val)
@@ -380,7 +380,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
ret_val = phy->ops.write_reg(hw,
(STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
- ANEG_MULTIGBT_AN_CTRL,
+ IGC_ANEG_MULTIGBT_AN_CTRL,
aneg_multigbt_an_ctrl);
return ret_val;
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index e5b893fc5b66..12ddc5793651 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -238,6 +238,8 @@
#define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40))
#define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40))
+#define IGC_TXARB 0x3354 /* Tx Arbitration Control TxARB - RW */
+
/* System Time Registers */
#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */
@@ -308,6 +310,16 @@
#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */
#define IGC_EEE_SU 0x0E34 /* EEE Setup */
+/* MULTI GBT AN Control Register - reg. 7.32 */
+#define IGC_ANEG_MULTIGBT_AN_CTRL 0x0020
+
+/* EEE ANeg Advertisement Register - reg 7.60 and reg 7.62 */
+#define IGC_ANEG_EEE_AB1 0x003c
+#define IGC_ANEG_EEE_AB2 0x003e
+/* EEE ANeg Link-Partner Advertisement Register - reg 7.61 and reg 7.63 */
+#define IGC_ANEG_EEE_LP_AB1 0x003d
+#define IGC_ANEG_EEE_LP_AB2 0x003f
+
/* LTR registers */
#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index 22cefb1eeedf..1e44374ca1ff 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -46,15 +46,25 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
if (is_cbs_enabled(adapter))
new_flags |= IGC_FLAG_TSN_QAV_ENABLED;
+ if (adapter->strict_priority_enable)
+ new_flags |= IGC_FLAG_TSN_LEGACY_ENABLED;
+
return new_flags;
}
+static bool igc_tsn_is_tx_mode_in_tsn(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ return !!(rd32(IGC_TQAVCTRL) & IGC_TQAVCTRL_TRANSMIT_MODE_TSN);
+}
+
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u16 txoffset;
- if (!is_any_launchtime(adapter))
+ if (!igc_tsn_is_tx_mode_in_tsn(adapter))
return;
switch (adapter->link_speed) {
@@ -78,11 +88,49 @@ void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
wr32(IGC_GTXOFFSET, txoffset);
}
+static void igc_tsn_restore_retx_default(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 retxctl;
+
+ retxctl = rd32(IGC_RETX_CTL) & IGC_RETX_CTL_WATERMARK_MASK;
+ wr32(IGC_RETX_CTL, retxctl);
+}
+
+bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ return (rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
+ adapter->taprio_offload_enable;
+}
+
+static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 txarb;
+
+ txarb = rd32(IGC_TXARB);
+
+ txarb &= ~(IGC_TXARB_TXQ_PRIO_0_MASK |
+ IGC_TXARB_TXQ_PRIO_1_MASK |
+ IGC_TXARB_TXQ_PRIO_2_MASK |
+ IGC_TXARB_TXQ_PRIO_3_MASK);
+
+ txarb |= IGC_TXARB_TXQ_PRIO_0(queue_per_tc[3]);
+ txarb |= IGC_TXARB_TXQ_PRIO_1(queue_per_tc[2]);
+ txarb |= IGC_TXARB_TXQ_PRIO_2(queue_per_tc[1]);
+ txarb |= IGC_TXARB_TXQ_PRIO_3(queue_per_tc[0]);
+
+ wr32(IGC_TXARB, txarb);
+}
+
/* Returns the TSN specific registers to their default values after
* the adapter is reset.
*/
static int igc_tsn_disable_offload(struct igc_adapter *adapter)
{
+ u16 queue_per_tc[4] = { 3, 2, 1, 0 };
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl;
int i;
@@ -91,6 +139,9 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
+ if (igc_is_device_id_i226(hw))
+ igc_tsn_restore_retx_default(adapter);
+
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
@@ -106,11 +157,39 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_QBVCYCLET_S, 0);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
+ /* Reset mqprio TC configuration. */
+ netdev_reset_tc(adapter->netdev);
+
+ /* Restore the default Tx arbitration: Priority 0 has the highest
+ * priority and is assigned to queue 0 and so on and so forth.
+ */
+ igc_tsn_tx_arb(adapter, queue_per_tc);
+
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
+ adapter->flags &= ~IGC_FLAG_TSN_LEGACY_ENABLED;
return 0;
}
+/* To partially fix i226 HW errata, reduce MAC internal buffering from 192 Bytes
+ * to 88 Bytes by setting RETX_CTL register using the recommendation from:
+ * a) Ethernet Controller I225/I226 Specification Update Rev 2.1
+ * Item 9: TSN: Packet Transmission Might Cross the Qbv Window
+ * b) I225/6 SW User Manual Rev 1.2.4: Section 8.11.5 Retry Buffer Control
+ */
+static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 retxctl, watermark;
+
+ retxctl = rd32(IGC_RETX_CTL);
+ watermark = retxctl & IGC_RETX_CTL_WATERMARK_MASK;
+ /* Set QBVFULLTH value using watermark and set QBVFULLEN */
+ retxctl |= (watermark << IGC_RETX_CTL_QBVFULLTH_SHIFT) |
+ IGC_RETX_CTL_QBVFULLEN;
+ wr32(IGC_RETX_CTL, retxctl);
+}
+
static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
@@ -123,6 +202,43 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
+ if (igc_is_device_id_i226(hw))
+ igc_tsn_set_retx_qbvfullthreshold(adapter);
+
+ if (adapter->strict_priority_enable) {
+ int err;
+
+ err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
+ if (err)
+ return err;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ err = netdev_set_tc_queue(adapter->netdev, i, 1,
+ adapter->queue_per_tc[i]);
+ if (err)
+ return err;
+ }
+
+ /* In case the card is configured with less than four queues. */
+ for (; i < IGC_MAX_TX_QUEUES; i++)
+ adapter->queue_per_tc[i] = i;
+
+ /* Configure queue priorities according to the user provided
+ * mapping.
+ */
+ igc_tsn_tx_arb(adapter, adapter->queue_per_tc);
+
+ /* Enable legacy TSN mode which will do strict priority without
+ * any other TSN features.
+ */
+ tqavctrl = rd32(IGC_TQAVCTRL);
+ tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN;
+ tqavctrl &= ~IGC_TQAVCTRL_ENHANCED_QAV;
+ wr32(IGC_TQAVCTRL, tqavctrl);
+
+ return 0;
+ }
+
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
u32 txqctl = 0;
@@ -262,14 +378,6 @@ skip_cbs:
s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
base_time = ktime_add_ns(base_time, (n + 1) * cycle);
-
- /* Increase the counter if scheduling into the past while
- * Gate Control List (GCL) is running.
- */
- if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
- (adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) &&
- (adapter->qbv_count > 1))
- adapter->qbv_config_change_errors++;
} else {
if (igc_is_device_id_i226(hw)) {
ktime_t adjust_time, expires_time;
@@ -331,15 +439,22 @@ int igc_tsn_reset(struct igc_adapter *adapter)
return err;
}
-int igc_tsn_offload_apply(struct igc_adapter *adapter)
+static bool igc_tsn_will_tx_mode_change(struct igc_adapter *adapter)
{
- struct igc_hw *hw = &adapter->hw;
+ bool any_tsn_enabled = !!(igc_tsn_new_flags(adapter) &
+ IGC_FLAG_TSN_ANY_ENABLED);
+
+ return (any_tsn_enabled && !igc_tsn_is_tx_mode_in_tsn(adapter)) ||
+ (!any_tsn_enabled && igc_tsn_is_tx_mode_in_tsn(adapter));
+}
- /* Per I225/6 HW Design Section 7.5.2.1, transmit mode
- * cannot be changed dynamically. Require reset the adapter.
+int igc_tsn_offload_apply(struct igc_adapter *adapter)
+{
+ /* Per I225/6 HW Design Section 7.5.2.1 guideline, if tx mode change
+ * from legacy->tsn or tsn->legacy, then reset adapter is needed.
*/
if (netif_running(adapter->netdev) &&
- (igc_is_device_id_i225(hw) || !adapter->qbv_count)) {
+ igc_tsn_will_tx_mode_change(adapter)) {
schedule_work(&adapter->reset_task);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
index b53e6af560b7..98ec845a86bf 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -7,5 +7,6 @@
int igc_tsn_offload_apply(struct igc_adapter *adapter);
int igc_tsn_reset(struct igc_adapter *adapter);
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);
+bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter);
#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index e85f7d2e8810..f2709b10c2e5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -317,7 +317,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
#ifdef IXGBE_FCOE
- if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ if (adapter->netdev->fcoe_mtu)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 4cac76254966..9482e0cca8b7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3196,16 +3196,12 @@ static int ixgbe_get_ts_info(struct net_device *dev,
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types =
BIT(HWTSTAMP_TX_OFF) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 18d63c8c2ff4..955dced844a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -858,7 +858,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
/* enable FCoE and notify stack */
adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
- netdev->features |= NETIF_F_FCOE_MTU;
+ netdev->fcoe_mtu = true;
netdev_features_change(netdev);
/* release existing queues and reallocate them */
@@ -898,7 +898,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
/* disable FCoE and notify stack */
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
- netdev->features &= ~NETIF_F_FCOE_MTU;
+ netdev->fcoe_mtu = false;
netdev_features_change(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 0ee943db3dc9..16fa621ce0ff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -981,7 +981,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
#ifdef IXGBE_FCOE
- if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
+ if (adapter->netdev->fcoe_mtu) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
if ((rxr_idx >= f->offset) &&
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8057cef61f39..8b8404d8c946 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5079,7 +5079,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
netif_set_tso_max_size(adapter->netdev, 32768);
#ifdef IXGBE_FCOE
- if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ if (adapter->netdev->fcoe_mtu)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
@@ -5136,8 +5136,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
#ifdef IXGBE_FCOE
/* FCoE traffic class uses FCOE jumbo frames */
- if ((dev->features & NETIF_F_FCOE_MTU) &&
- (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ if (dev->fcoe_mtu && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE &&
(pb == ixgbe_fcoe_get_tc(adapter)))
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif
@@ -5197,8 +5196,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
#ifdef IXGBE_FCOE
/* FCoE traffic class uses FCOE jumbo frames */
- if ((dev->features & NETIF_F_FCOE_MTU) &&
- (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ if (dev->fcoe_mtu && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE &&
(pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif
@@ -11096,8 +11094,7 @@ skip_sriov:
NETIF_F_FCOE_CRC;
netdev->vlan_features |= NETIF_F_FSO |
- NETIF_F_FCOE_CRC |
- NETIF_F_FCOE_MTU;
+ NETIF_F_FCOE_CRC;
}
#endif /* IXGBE_FCOE */
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index fcfd0a075eee..e71715f5da22 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -495,7 +495,7 @@ static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf
int err = 0;
#ifdef CONFIG_FCOE
- if (dev->features & NETIF_F_FCOE_MTU)
+ if (dev->fcoe_mtu)
pf_max_frame = max_t(int, pf_max_frame,
IXGBE_FCOE_JUMBO_FRAME_SIZE);
@@ -857,7 +857,7 @@ static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf)
int pf_max_frame = dev->mtu + ETH_HLEN;
#if IS_ENABLED(CONFIG_FCOE)
- if (dev->features & NETIF_F_FCOE_MTU)
+ if (dev->fcoe_mtu)
pf_max_frame = max_t(int, pf_max_frame,
IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif /* CONFIG_FCOE */
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b06e24562973..d8be0e4dcb07 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -946,15 +946,13 @@ jme_udpsum(struct sk_buff *skb)
if (skb->protocol != htons(ETH_P_IP))
return csum;
skb_set_network_header(skb, ETH_HLEN);
- if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
- (skb->len < (ETH_HLEN +
- (ip_hdr(skb)->ihl << 2) +
- sizeof(struct udphdr)))) {
+
+ if (ip_hdr(skb)->protocol != IPPROTO_UDP ||
+ skb->len < (ETH_HLEN + ip_hdrlen(skb) + sizeof(struct udphdr))) {
skb_reset_network_header(skb);
return csum;
}
- skb_set_transport_header(skb,
- ETH_HLEN + (ip_hdr(skb)->ihl << 2));
+ skb_set_transport_header(skb, ETH_HLEN + ip_hdrlen(skb));
csum = udp_hdr(skb)->check;
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 9e6984815386..7179271f63b6 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -95,7 +95,6 @@ struct ltq_etop_priv {
struct mii_bus *mii_bus;
struct ltq_etop_chan ch[MAX_DMA_CHAN];
- int tx_free[MAX_DMA_CHAN >> 1];
int tx_burst_len;
int rx_burst_len;
@@ -482,7 +481,9 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
u32 byte_offset;
- len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+ if (skb_put_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ len = skb->len;
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
netdev_err(dev, "tx ring full\n");
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index f35ae2c88091..9e80899546d9 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2802,7 +2802,7 @@ port_err:
static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
{
struct mv643xx_eth_shared_platform_data *pd;
- struct device_node *pnp, *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node;
int ret;
/* bail out if not registered from DT */
@@ -2816,10 +2816,9 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
- for_each_available_child_of_node(np, pnp) {
+ for_each_available_child_of_node_scoped(np, pnp) {
ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
if (ret) {
- of_node_put(pnp);
mv643xx_eth_shared_of_remove();
return ret;
}
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 9190eff6c0bb..e1d003fdbc2e 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -104,7 +104,7 @@ static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops,
return 0;
} else {
/* wait_event_timeout does not guarantee a delay of at
- * least one whole jiffie, so timeout must be no less
+ * least one whole jiffy, so timeout must be no less
* than two.
*/
timeout = max(usecs_to_jiffies(MVMDIO_SMI_TIMEOUT), 2);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 41894834fb53..d72b2d5f96db 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1781,7 +1781,7 @@ static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
}
/* Set TXQ descriptors fields relevant for CSUM calculation */
-static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvneta_txq_desc_csum(int l3_offs, __be16 l3_proto,
int ip_hdr_len, int l4_proto)
{
u32 command;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index e809f91c08fb..9e02e4367bec 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1088,7 +1088,7 @@ struct mvpp2 {
unsigned int max_port_rxqs;
/* Workqueue to gather hardware statistics */
- char queue_name[30];
+ char queue_name[31];
struct workqueue_struct *stats_queue;
/* Debugfs root entry */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 40aeaa7bd739..1641791a2d5b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1522,29 +1522,19 @@ static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx)
return 0;
}
-int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx)
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 port_ctx)
{
u32 rss_ctx;
- int ret, i;
+ int ret;
ret = mvpp22_rss_context_create(port, &rss_ctx);
if (ret)
return ret;
- /* Find the first available context number in the port, starting from 1.
- * Context 0 on each port is reserved for the default context.
- */
- for (i = 1; i < MVPP22_N_RSS_TABLES; i++) {
- if (port->rss_ctx[i] < 0)
- break;
- }
-
- if (i == MVPP22_N_RSS_TABLES)
+ if (WARN_ON_ONCE(port->rss_ctx[port_ctx] >= 0))
return -EINVAL;
- port->rss_ctx[i] = rss_ctx;
- *port_ctx = i;
-
+ port->rss_ctx[port_ctx] = rss_ctx;
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 663157dc8062..85c9c6e80678 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -264,7 +264,7 @@ int mvpp22_port_rss_init(struct mvpp2_port *port);
int mvpp22_port_rss_enable(struct mvpp2_port *port);
int mvpp22_port_rss_disable(struct mvpp2_port *port);
-int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *rss_ctx);
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 rss_ctx);
int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 rss_ctx);
int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 0d62a33afa80..3880dcc0418b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5268,8 +5268,6 @@ static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -5696,40 +5694,82 @@ static int mvpp2_ethtool_get_rxfh(struct net_device *dev,
return ret;
}
-static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
- struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
+static bool mvpp2_ethtool_rxfh_okay(struct mvpp2_port *port,
+ const struct ethtool_rxfh_param *rxfh)
{
- struct mvpp2_port *port = netdev_priv(dev);
- u32 *rss_context = &rxfh->rss_context;
- int ret = 0;
-
if (!mvpp22_rss_is_supported(port))
- return -EOPNOTSUPP;
+ return false;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_CRC32)
- return -EOPNOTSUPP;
+ return false;
if (rxfh->key)
+ return false;
+
+ return true;
+}
+
+static int mvpp2_create_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp2_ethtool_rxfh_okay(port, rxfh))
return -EOPNOTSUPP;
- if (*rss_context && rxfh->rss_delete)
- return mvpp22_port_rss_ctx_delete(port, *rss_context);
+ ctx->hfunc = ETH_RSS_HASH_CRC32;
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- ret = mvpp22_port_rss_ctx_create(port, rss_context);
- if (ret)
- return ret;
- }
+ ret = mvpp22_port_rss_ctx_create(port, rxfh->rss_context);
+ if (ret)
+ return ret;
- if (rxfh->indir)
- ret = mvpp22_port_rss_ctx_indir_set(port, *rss_context,
+ if (!rxfh->indir)
+ ret = mvpp22_port_rss_ctx_indir_get(port, rxfh->rss_context,
+ ethtool_rxfh_context_indir(ctx));
+ else
+ ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context,
rxfh->indir);
+ return ret;
+}
+static int mvpp2_modify_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp2_ethtool_rxfh_okay(port, rxfh))
+ return -EOPNOTSUPP;
+
+ if (rxfh->indir)
+ ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context,
+ rxfh->indir);
return ret;
}
+static int mvpp2_remove_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ return mvpp22_port_rss_ctx_delete(port, rss_context);
+}
+
+static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack);
+}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -5749,7 +5789,7 @@ static const struct net_device_ops mvpp2_netdev_ops = {
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
- .cap_rss_ctx_supported = true,
+ .rxfh_max_num_contexts = MVPP22_N_RSS_TABLES,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvpp2_ethtool_nway_reset,
@@ -5772,6 +5812,9 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
+ .create_rxfh_context = mvpp2_create_rxfh_context,
+ .modify_rxfh_context = mvpp2_modify_rxfh_context,
+ .remove_rxfh_context = mvpp2_remove_rxfh_context,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -7417,8 +7460,6 @@ static int mvpp2_get_sram(struct platform_device *pdev,
static int mvpp2_probe(struct platform_device *pdev)
{
- struct fwnode_handle *fwnode = pdev->dev.fwnode;
- struct fwnode_handle *port_fwnode;
struct mvpp2 *priv;
struct resource *res;
void __iomem *base;
@@ -7591,7 +7632,7 @@ static int mvpp2_probe(struct platform_device *pdev)
}
/* Map DTS-active ports. Should be done before FIFO mvpp2_init */
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ device_for_each_child_node_scoped(&pdev->dev, port_fwnode) {
if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
priv->port_map |= BIT(i);
}
@@ -7614,7 +7655,7 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_axi_clk;
/* Initialize ports */
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ device_for_each_child_node_scoped(&pdev->dev, port_fwnode) {
err = mvpp2_port_probe(pdev, port_fwnode, priv);
if (err < 0)
goto err_port_probe;
@@ -7653,14 +7694,8 @@ static int mvpp2_probe(struct platform_device *pdev)
return 0;
err_port_probe:
- fwnode_handle_put(port_fwnode);
-
- i = 0;
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
- if (priv->port_list[i])
- mvpp2_port_remove(priv->port_list[i]);
- i++;
- }
+ for (i = 0; i < priv->port_count; i++)
+ mvpp2_port_remove(priv->port_list[i]);
err_axi_clk:
clk_disable_unprepare(priv->axi_clk);
err_mg_core_clk:
@@ -7677,18 +7712,13 @@ err_pp_clk:
static void mvpp2_remove(struct platform_device *pdev)
{
struct mvpp2 *priv = platform_get_drvdata(pdev);
- struct fwnode_handle *fwnode = pdev->dev.fwnode;
- int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
- struct fwnode_handle *port_fwnode;
+ int i, poolnum = MVPP2_BM_POOLS_NUM;
mvpp2_dbgfs_cleanup(priv);
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
- if (priv->port_list[i]) {
- mutex_destroy(&priv->port_list[i]->gather_stats_lock);
- mvpp2_port_remove(priv->port_list[i]);
- }
- i++;
+ for (i = 0; i < priv->port_count; i++) {
+ mutex_destroy(&priv->port_list[i]->gather_stats_lock);
+ mvpp2_port_remove(priv->port_list[i]);
}
destroy_workqueue(priv->stats_queue);
@@ -7711,7 +7741,7 @@ static void mvpp2_remove(struct platform_device *pdev)
aggr_txq->descs_dma);
}
- if (is_acpi_node(port_fwnode))
+ if (!dev_of_node(&pdev->dev))
return;
clk_disable_unprepare(priv->axi_clk);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index ed2160cc5acb..6ea2f3071fe8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -1856,8 +1856,9 @@ struct cpt_flt_eng_info_req {
struct cpt_flt_eng_info_rsp {
struct mbox_msghdr hdr;
- u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU];
- u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU];
+#define CPT_AF_MAX_FLT_INT_VECS 3
+ u64 flt_eng_map[CPT_AF_MAX_FLT_INT_VECS];
+ u64 rcvrd_eng_map[CPT_AF_MAX_FLT_INT_VECS];
u64 rsvd;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index ac7ee3f3598c..1a97fb9032fa 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2479,9 +2479,9 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
goto free_regions;
}
- mw->mbox_wq = alloc_workqueue(name,
+ mw->mbox_wq = alloc_workqueue("%s",
WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
- num);
+ num, name);
if (!mw->mbox_wq) {
err = -ENOMEM;
goto unmap_regions;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 03ee93fd9e94..5016ba82e142 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -319,6 +319,7 @@ struct nix_mark_format {
/* smq(flush) to tl1 cir/pir info */
struct nix_smq_tree_ctx {
+ u16 schq;
u64 cir_off;
u64 cir_val;
u64 pir_off;
@@ -328,8 +329,6 @@ struct nix_smq_tree_ctx {
/* smq flush context */
struct nix_smq_flush_ctx {
int smq;
- u16 tl1_schq;
- u16 tl2_schq;
struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
};
@@ -400,6 +399,7 @@ struct hw_cap {
bool nix_multiple_dwrr_mtu; /* Multiple DWRR_MTU to choose from */
bool npc_hash_extract; /* Hash extract enabled ? */
bool npc_exact_match_enabled; /* Exact match supported ? */
+ bool cpt_rxc; /* Is CPT-RXC supported */
};
struct rvu_hwinfo {
@@ -690,6 +690,35 @@ static inline bool is_cnf10ka_a0(struct rvu *rvu)
return false;
}
+static inline bool is_cn10ka_a0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A &&
+ (pdev->revision & 0x0F) == 0x0)
+ return true;
+ return false;
+}
+
+static inline bool is_cn10ka_a1(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A &&
+ (pdev->revision & 0x0F) == 0x1)
+ return true;
+ return false;
+}
+
+static inline bool is_cn10kb(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
+ return true;
+ return false;
+}
+
static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
{
u64 npc_const3;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 3e09d2285814..3c5bbaf12e59 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -19,6 +19,12 @@
/* Length of initial context fetch in 128 byte words */
#define CPT_CTX_ILEN 1ULL
+/* Interrupt vector count of CPT RVU and RAS interrupts */
+#define CPT_10K_AF_RVU_RAS_INT_VEC_CNT 2
+
+/* Default CPT_AF_RXC_CFG1:max_rxc_icb_cnt */
+#define CPT_DFLT_MAX_RXC_ICB_CNT 0xC0ULL
+
#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
({ \
u64 free_sts = 0, busy_sts = 0; \
@@ -37,6 +43,41 @@
(_rsp)->free_sts_##etype = free_sts; \
})
+#define MAX_AE GENMASK_ULL(47, 32)
+#define MAX_IE GENMASK_ULL(31, 16)
+#define MAX_SE GENMASK_ULL(15, 0)
+
+static u16 cpt_max_engines_get(struct rvu *rvu)
+{
+ u16 max_ses, max_ies, max_aes;
+ u64 reg;
+
+ reg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS1);
+ max_ses = FIELD_GET(MAX_SE, reg);
+ max_ies = FIELD_GET(MAX_IE, reg);
+ max_aes = FIELD_GET(MAX_AE, reg);
+
+ return max_ses + max_ies + max_aes;
+}
+
+/* Number of flt interrupt vectors are depends on number of engines that the
+ * chip has. Each flt vector represents 64 engines.
+ */
+static int cpt_10k_flt_nvecs_get(struct rvu *rvu, u16 max_engs)
+{
+ int flt_vecs;
+
+ flt_vecs = DIV_ROUND_UP(max_engs, 64);
+
+ if (flt_vecs > CPT_10K_AF_INT_VEC_FLT_MAX) {
+ dev_warn_once(rvu->dev, "flt_vecs:%d exceeds the max vectors:%d\n",
+ flt_vecs, CPT_10K_AF_INT_VEC_FLT_MAX);
+ flt_vecs = CPT_10K_AF_INT_VEC_FLT_MAX;
+ }
+
+ return flt_vecs;
+}
+
static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
{
struct rvu_block *block = ptr;
@@ -150,17 +191,26 @@ static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
{
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
- int i;
+ int i, flt_vecs;
+ u16 max_engs;
+ u8 nr;
+
+ max_engs = cpt_max_engines_get(rvu);
+ flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
/* Disable all CPT AF interrupts */
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL);
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL);
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF);
+ for (i = CPT_10K_AF_INT_VEC_FLT0; i < flt_vecs; i++) {
+ nr = (max_engs > 64) ? 64 : max_engs;
+ max_engs -= nr;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i),
+ INTR_MASK(nr));
+ }
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
- for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++)
+ /* CPT AF interrupt vectors are flt_int, rvu_int and ras_int. */
+ for (i = 0; i < flt_vecs + CPT_10K_AF_RVU_RAS_INT_VEC_CNT; i++)
if (rvu->irq_allocated[off + i]) {
free_irq(pci_irq_vector(rvu->pdev, off + i), block);
rvu->irq_allocated[off + i] = false;
@@ -206,12 +256,18 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu)
static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
{
+ int rvu_intr_vec, ras_intr_vec;
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
irq_handler_t flt_fn;
- int i, ret;
+ int i, ret, flt_vecs;
+ u16 max_engs;
+ u8 nr;
+
+ max_engs = cpt_max_engines_get(rvu);
+ flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
- for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
+ for (i = CPT_10K_AF_INT_VEC_FLT0; i < flt_vecs; i++) {
sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
switch (i) {
@@ -229,20 +285,24 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]);
if (ret)
goto err;
- if (i == CPT_10K_AF_INT_VEC_FLT2)
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF);
- else
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
+
+ nr = (max_engs > 64) ? 64 : max_engs;
+ max_engs -= nr;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i),
+ INTR_MASK(nr));
}
- ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
+ rvu_intr_vec = flt_vecs;
+ ras_intr_vec = rvu_intr_vec + 1;
+
+ ret = rvu_cpt_do_register_interrupt(block, off + rvu_intr_vec,
rvu_cpt_af_rvu_intr_handler,
"CPTAF RVU");
if (ret)
goto err;
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
- ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS,
+ ret = rvu_cpt_do_register_interrupt(block, off + ras_intr_vec,
rvu_cpt_af_ras_intr_handler,
"CPTAF RAS");
if (ret)
@@ -632,7 +692,9 @@ int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
return ret;
}
-static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
+static bool validate_and_update_reg_offset(struct rvu *rvu,
+ struct cpt_rd_wr_reg_msg *req,
+ u64 *reg_offset)
{
u64 offset = req->reg_offset;
int blkaddr, num_lfs, lf;
@@ -663,6 +725,11 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
if (lf < 0)
return false;
+ /* Translate local LF's offset to global CPT LF's offset to
+ * access LFX register.
+ */
+ *reg_offset = (req->reg_offset & 0xFF000) + (lf << 3);
+
return true;
} else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
/* Registers that can be accessed from PF */
@@ -673,6 +740,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
case CPT_AF_BLK_RST:
case CPT_AF_CONSTANTS1:
case CPT_AF_CTX_FLUSH_TIMER:
+ case CPT_AF_RXC_CFG1:
return true;
}
@@ -697,7 +765,7 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
struct cpt_rd_wr_reg_msg *rsp)
{
u64 offset = req->reg_offset;
- int blkaddr, lf;
+ int blkaddr;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
@@ -708,18 +776,10 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
- if (!is_valid_offset(rvu, req))
+ if (!validate_and_update_reg_offset(rvu, req, &offset))
return CPT_AF_ERR_ACCESS_DENIED;
- /* Translate local LF used by VFs to global CPT LF */
- lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], req->hdr.pcifunc,
- (offset & 0xFFF) >> 3);
-
- /* Translate local LF's offset to global CPT LF's offset */
- offset &= 0xFF000;
- offset += lf << 3;
-
- rsp->reg_offset = offset;
+ rsp->reg_offset = req->reg_offset;
rsp->ret_val = req->ret_val;
rsp->is_write = req->is_write;
@@ -733,6 +793,8 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
{
+ struct rvu_hwinfo *hw = rvu->hw;
+
if (is_rvu_otx2(rvu))
return;
@@ -756,14 +818,16 @@ static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR);
rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID);
rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER);
+ rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
+ rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
+ if (!hw->cap.cpt_rxc)
+ return;
rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME);
rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
- rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
- rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
}
static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
@@ -922,13 +986,17 @@ int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_r
struct rvu_block *block;
unsigned long flags;
int blkaddr, vec;
+ int flt_vecs;
+ u16 max_engs;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
return blkaddr;
block = &rvu->hw->block[blkaddr];
- for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) {
+ max_engs = cpt_max_engines_get(rvu);
+ flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
+ for (vec = 0; vec < flt_vecs; vec++) {
spin_lock_irqsave(&rvu->cpt_intr_lock, flags);
rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec];
rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec];
@@ -944,10 +1012,11 @@ int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_r
static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
{
struct cpt_rxc_time_cfg_req req, prev;
+ struct rvu_hwinfo *hw = rvu->hw;
int timeout = 2000;
u64 reg;
- if (is_rvu_otx2(rvu))
+ if (!hw->cap.cpt_rxc)
return;
/* Set time limit to minimum values, so that rxc entries will be
@@ -1220,10 +1289,30 @@ unlock:
return 0;
}
+#define MAX_RXC_ICB_CNT GENMASK_ULL(40, 32)
+
int rvu_cpt_init(struct rvu *rvu)
{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 reg_val;
+
/* Retrieve CPT PF number */
rvu->cpt_pf_num = get_cpt_pf_num(rvu);
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT0) && !is_rvu_otx2(rvu) &&
+ !is_cn10kb(rvu))
+ hw->cap.cpt_rxc = true;
+
+ if (hw->cap.cpt_rxc && !is_cn10ka_a0(rvu) && !is_cn10ka_a1(rvu)) {
+ /* Set CPT_AF_RXC_CFG1:max_rxc_icb_cnt to 0xc0 to not effect
+ * inline inbound peak performance
+ */
+ reg_val = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_RXC_CFG1);
+ reg_val &= ~MAX_RXC_ICB_CNT;
+ reg_val |= FIELD_PREP(MAX_RXC_ICB_CNT,
+ CPT_DFLT_MAX_RXC_ICB_CNT);
+ rvu_write64(rvu, BLKADDR_CPT0, CPT_AF_RXC_CFG1, reg_val);
+ }
+
spin_lock_init(&rvu->cpt_intr_lock);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 4a4ef5bd9e0b..87ba77e5026a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -838,10 +838,10 @@ RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
{
+ char cgx[10], lmac[10], chan[10];
struct rvu *rvu = filp->private;
struct pci_dev *pdev = NULL;
struct mac_ops *mac_ops;
- char cgx[10], lmac[10];
struct rvu_pfvf *pfvf;
int pf, domain, blkid;
u8 cgx_id, lmac_id;
@@ -852,7 +852,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
/* There can be no CGX devices at all */
if (!mac_ops)
return 0;
- seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
+ seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\tCHAN\n",
mac_ops->name);
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
@@ -876,8 +876,11 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
&lmac_id);
sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
sprintf(lmac, "LMAC%d", lmac_id);
- seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
- dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
+ sprintf(chan, "%d",
+ rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0));
+ seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\t%s\n",
+ dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac,
+ chan);
pci_dev_put(pdev);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 222f9e00b836..da69350c6f76 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2259,14 +2259,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
schq = smq;
for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
+ smq_tree_ctx->schq = schq;
if (lvl == NIX_TXSCH_LVL_TL1) {
- smq_flush_ctx->tl1_schq = schq;
smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
smq_tree_ctx->pir_off = 0;
smq_tree_ctx->pir_val = 0;
parent_off = 0;
} else if (lvl == NIX_TXSCH_LVL_TL2) {
- smq_flush_ctx->tl2_schq = schq;
smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
parent_off = NIX_AF_TL2X_PARENT(schq);
@@ -2301,8 +2300,8 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
{
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
+ int tl2, tl2_schq;
u64 regoff;
- int tl2;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
@@ -2310,16 +2309,17 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
/* loop through all TL2s with matching PF_FUNC */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
/* skip the smq(flush) TL2 */
- if (tl2 == smq_flush_ctx->tl2_schq)
+ if (tl2 == tl2_schq)
continue;
/* skip unused TL2s */
if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
continue;
/* skip if PF_FUNC doesn't match */
if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
- (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
+ (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
~RVU_PFVF_FUNC_MASK)))
continue;
/* enable/disable XOFF */
@@ -2361,10 +2361,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
int smq, u16 pcifunc, int nixlf)
{
struct nix_smq_flush_ctx *smq_flush_ctx;
+ int err, restore_tx_en = 0, i;
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
- int err, restore_tx_en = 0;
- u64 cfg;
+ u16 tl2_tl3_link_schq;
+ u8 link, link_level;
+ u64 cfg, bmap = 0;
if (!is_rvu_otx2(rvu)) {
/* Skip SMQ flush if pkt count is zero */
@@ -2388,16 +2390,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
- /* Do SMQ flush and set enqueue xoff */
- cfg |= BIT_ULL(50) | BIT_ULL(49);
- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
-
/* Disable backpressure from physical link,
* otherwise SMQ flush may stall.
*/
rvu_cgx_enadis_rx_bp(rvu, pf, false);
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
+ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
+ link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
+
+ /* SMQ set enqueue xoff */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ cfg |= BIT_ULL(50);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+
+ /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
+ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
+ if (!(cfg & BIT_ULL(12)))
+ continue;
+ bmap |= BIT_ULL(i);
+ cfg &= ~BIT_ULL(12);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
+ }
+
+ /* Do SMQ flush and set enqueue xoff */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+
/* Wait for flush to complete */
err = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
@@ -2406,6 +2430,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
"NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
nixlf, smq);
+ /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
+ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
+ if (!(bmap & BIT_ULL(i)))
+ continue;
+ cfg = rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
+ cfg |= BIT_ULL(12);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
+ }
+
/* clear XOFF on TL2s */
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index d56be5fb7eb4..2b299fa85159 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -545,6 +545,7 @@
#define CPT_AF_CTX_PSH_PC (0x49450ull)
#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull)
#define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3)
+#define CPT_AF_RXC_CFG1 (0x50000ull)
#define CPT_AF_RXC_TIME (0x50010ull)
#define CPT_AF_RXC_TIME_CFG (0x50018ull)
#define CPT_AF_RXC_DFRG (0x50020ull)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 5ef406c7e8a4..fc8da2090657 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -71,13 +71,11 @@ enum cpt_af_int_vec_e {
CPT_AF_INT_VEC_CNT = 0x4,
};
-enum cpt_10k_af_int_vec_e {
+enum cpt_cn10k_flt_int_vec_e {
CPT_10K_AF_INT_VEC_FLT0 = 0x0,
CPT_10K_AF_INT_VEC_FLT1 = 0x1,
CPT_10K_AF_INT_VEC_FLT2 = 0x2,
- CPT_10K_AF_INT_VEC_RVU = 0x3,
- CPT_10K_AF_INT_VEC_RAS = 0x4,
- CPT_10K_AF_INT_VEC_CNT = 0x5,
+ CPT_10K_AF_INT_VEC_FLT_MAX = 0x3,
};
/* NPA Admin function Interrupt Vector Enumeration */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 0db62eb0dab3..32468c663605 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -962,8 +962,6 @@ static int otx2_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 3eb85949677a..933e18ba2fb2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -687,7 +687,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
} else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
__be16 l3_proto = vlan_get_protocol(skb);
struct udphdr *udph = udp_hdr(skb);
- u16 iplen;
+ __be16 iplen;
ext->lso_sb = skb_transport_offset(skb) +
sizeof(struct udphdr);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 63ae01954dfc..22ca6ee9665e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -633,7 +633,8 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
if (err)
goto err_dl_port_register;
- dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
+ dev->features |= NETIF_F_HW_TC;
+ dev->netns_local = true;
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
SET_NETDEV_DEV(dev, sw->dev->dev);
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c
index 1c5b85a86df1..2c26eb185283 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -18,6 +18,7 @@
#include <uapi/linux/ppp_defs.h>
#define AIROHA_MAX_NUM_GDM_PORTS 1
+#define AIROHA_MAX_NUM_QDMA 2
#define AIROHA_MAX_NUM_RSTS 3
#define AIROHA_MAX_NUM_XSI_RSTS 5
#define AIROHA_MAX_MTU 2000
@@ -66,9 +67,11 @@
#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
#define FE_RST_CORE_MASK BIT(0)
+#define REG_FE_WAN_MAC_H 0x0030
#define REG_FE_LAN_MAC_H 0x0040
-#define REG_FE_LAN_MAC_LMIN 0x0044
-#define REG_FE_LAN_MAC_LMAX 0x0048
+
+#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
+#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
#define REG_FE_CDM1_OQ_MAP0 0x0050
#define REG_FE_CDM1_OQ_MAP1 0x0054
@@ -727,7 +730,7 @@ struct airoha_queue_entry {
};
struct airoha_queue {
- struct airoha_eth *eth;
+ struct airoha_qdma *qdma;
/* protect concurrent queue accesses */
spinlock_t lock;
@@ -746,7 +749,7 @@ struct airoha_queue {
};
struct airoha_tx_irq_queue {
- struct airoha_eth *eth;
+ struct airoha_qdma *qdma;
struct napi_struct napi;
u32 *q;
@@ -782,9 +785,30 @@ struct airoha_hw_stats {
u64 rx_len[7];
};
+struct airoha_qdma {
+ struct airoha_eth *eth;
+ void __iomem *regs;
+
+ /* protect concurrent irqmask accesses */
+ spinlock_t irq_lock;
+ u32 irqmask[QDMA_INT_REG_MAX];
+ int irq;
+
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+
+ /* descriptor and packet buffers for qdma hw forward */
+ struct {
+ void *desc;
+ void *q;
+ } hfwd;
+};
+
struct airoha_gdm_port {
+ struct airoha_qdma *qdma;
struct net_device *dev;
- struct airoha_eth *eth;
int id;
struct airoha_hw_stats stats;
@@ -794,31 +818,15 @@ struct airoha_eth {
struct device *dev;
unsigned long state;
-
- void __iomem *qdma_regs;
void __iomem *fe_regs;
- /* protect concurrent irqmask accesses */
- spinlock_t irq_lock;
- u32 irqmask[QDMA_INT_REG_MAX];
- int irq;
-
struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
- struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
-
struct net_device *napi_dev;
- struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
- struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
-
- struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
- /* descriptor and packet buffers for qdma hw forward */
- struct {
- void *desc;
- void *q;
- } hfwd;
+ struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
+ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
};
static u32 airoha_rr(void __iomem *base, u32 offset)
@@ -850,60 +858,72 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
#define airoha_fe_clear(eth, offset, val) \
airoha_rmw((eth)->fe_regs, (offset), (val), 0)
-#define airoha_qdma_rr(eth, offset) \
- airoha_rr((eth)->qdma_regs, (offset))
-#define airoha_qdma_wr(eth, offset, val) \
- airoha_wr((eth)->qdma_regs, (offset), (val))
-#define airoha_qdma_rmw(eth, offset, mask, val) \
- airoha_rmw((eth)->qdma_regs, (offset), (mask), (val))
-#define airoha_qdma_set(eth, offset, val) \
- airoha_rmw((eth)->qdma_regs, (offset), 0, (val))
-#define airoha_qdma_clear(eth, offset, val) \
- airoha_rmw((eth)->qdma_regs, (offset), (val), 0)
-
-static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index,
+#define airoha_qdma_rr(qdma, offset) \
+ airoha_rr((qdma)->regs, (offset))
+#define airoha_qdma_wr(qdma, offset, val) \
+ airoha_wr((qdma)->regs, (offset), (val))
+#define airoha_qdma_rmw(qdma, offset, mask, val) \
+ airoha_rmw((qdma)->regs, (offset), (mask), (val))
+#define airoha_qdma_set(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), 0, (val))
+#define airoha_qdma_clear(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
+static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
u32 clear, u32 set)
{
unsigned long flags;
- if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask)))
+ if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
return;
- spin_lock_irqsave(&eth->irq_lock, flags);
+ spin_lock_irqsave(&qdma->irq_lock, flags);
- eth->irqmask[index] &= ~clear;
- eth->irqmask[index] |= set;
- airoha_qdma_wr(eth, REG_INT_ENABLE(index), eth->irqmask[index]);
+ qdma->irqmask[index] &= ~clear;
+ qdma->irqmask[index] |= set;
+ airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
/* Read irq_enable register in order to guarantee the update above
* completes in the spinlock critical section.
*/
- airoha_qdma_rr(eth, REG_INT_ENABLE(index));
+ airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
- spin_unlock_irqrestore(&eth->irq_lock, flags);
+ spin_unlock_irqrestore(&qdma->irq_lock, flags);
}
-static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index,
+static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
u32 mask)
{
- airoha_qdma_set_irqmask(eth, index, 0, mask);
+ airoha_qdma_set_irqmask(qdma, index, 0, mask);
}
-static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index,
+static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
u32 mask)
{
- airoha_qdma_set_irqmask(eth, index, mask, 0);
+ airoha_qdma_set_irqmask(qdma, index, mask, 0);
}
-static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr)
+static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
{
- u32 val;
+ /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
+ * GDM{2,3,4} can be used as wan port connected to an external
+ * phy module.
+ */
+ return port->id == 1;
+}
+
+static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 val, reg;
+ reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
+ : REG_FE_WAN_MAC_H;
val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
- airoha_fe_wr(eth, REG_FE_LAN_MAC_H, val);
+ airoha_fe_wr(eth, reg, val);
val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
- airoha_fe_wr(eth, REG_FE_LAN_MAC_LMIN, val);
- airoha_fe_wr(eth, REG_FE_LAN_MAC_LMAX, val);
+ airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
+ airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
}
static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
@@ -1383,8 +1403,9 @@ static int airoha_fe_init(struct airoha_eth *eth)
static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
{
enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
- struct airoha_eth *eth = q->eth;
- int qid = q - &eth->q_rx[0];
+ struct airoha_qdma *qdma = q->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0];
int nframes = 0;
while (q->queued < q->ndesc - 1) {
@@ -1420,7 +1441,8 @@ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
WRITE_ONCE(desc->msg2, 0);
WRITE_ONCE(desc->msg3, 0);
- airoha_qdma_rmw(eth, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
+ airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
+ RX_RING_CPU_IDX_MASK,
FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
}
@@ -1450,8 +1472,9 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
{
enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
- struct airoha_eth *eth = q->eth;
- int qid = q - &eth->q_rx[0];
+ struct airoha_qdma *qdma = q->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0];
int done = 0;
while (done < budget) {
@@ -1513,7 +1536,6 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
{
struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
- struct airoha_eth *eth = q->eth;
int cur, done = 0;
do {
@@ -1522,14 +1544,14 @@ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
} while (cur && done < budget);
if (done < budget && napi_complete(napi))
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1,
+ airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
RX_DONE_INT_MASK);
return done;
}
-static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
- struct airoha_queue *q, int ndesc)
+static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
+ struct airoha_qdma *qdma, int ndesc)
{
const struct page_pool_params pp_params = {
.order = 0,
@@ -1538,15 +1560,16 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
.dma_dir = DMA_FROM_DEVICE,
.max_len = PAGE_SIZE,
.nid = NUMA_NO_NODE,
- .dev = eth->dev,
+ .dev = qdma->eth->dev,
.napi = &q->napi,
};
- int qid = q - &eth->q_rx[0], thr;
+ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0], thr;
dma_addr_t dma_addr;
q->buf_size = PAGE_SIZE / 2;
q->ndesc = ndesc;
- q->eth = eth;
+ q->qdma = qdma;
q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
GFP_KERNEL);
@@ -1568,14 +1591,15 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
- airoha_qdma_wr(eth, REG_RX_RING_BASE(qid), dma_addr);
- airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_SIZE_MASK,
+ airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
+ RX_RING_SIZE_MASK,
FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
thr = clamp(ndesc >> 3, 1, 32);
- airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
+ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
FIELD_PREP(RX_RING_THR_MASK, thr));
- airoha_qdma_rmw(eth, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
+ airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
airoha_qdma_fill_rx_queue(q);
@@ -1585,7 +1609,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
{
- struct airoha_eth *eth = q->eth;
+ struct airoha_eth *eth = q->qdma->eth;
while (q->queued) {
struct airoha_queue_entry *e = &q->entry[q->tail];
@@ -1599,11 +1623,11 @@ static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
}
}
-static int airoha_qdma_init_rx(struct airoha_eth *eth)
+static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
{
int i;
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
int err;
if (!(RX_DONE_INT_MASK & BIT(i))) {
@@ -1611,7 +1635,7 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth)
continue;
}
- err = airoha_qdma_init_rx_queue(eth, &eth->q_rx[i],
+ err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
RX_DSCP_NUM(i));
if (err)
return err;
@@ -1623,12 +1647,14 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth)
static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
{
struct airoha_tx_irq_queue *irq_q;
+ struct airoha_qdma *qdma;
struct airoha_eth *eth;
int id, done = 0;
irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
- eth = irq_q->eth;
- id = irq_q - &eth->q_tx_irq[0];
+ qdma = irq_q->qdma;
+ id = irq_q - &qdma->q_tx_irq[0];
+ eth = qdma->eth;
while (irq_q->queued > 0 && done < budget) {
u32 qid, last, val = irq_q->q[irq_q->head];
@@ -1645,10 +1671,10 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
- if (qid >= ARRAY_SIZE(eth->q_tx))
+ if (qid >= ARRAY_SIZE(qdma->q_tx))
continue;
- q = &eth->q_tx[qid];
+ q = &qdma->q_tx[qid];
if (!q->ndesc)
continue;
@@ -1697,28 +1723,29 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
int i, len = done >> 7;
for (i = 0; i < len; i++)
- airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
+ airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
IRQ_CLEAR_LEN_MASK, 0x80);
- airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
+ airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
IRQ_CLEAR_LEN_MASK, (done & 0x7f));
}
if (done < budget && napi_complete(napi))
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0,
+ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(id));
return done;
}
-static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
- struct airoha_queue *q, int size)
+static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
+ struct airoha_qdma *qdma, int size)
{
- int i, qid = q - &eth->q_tx[0];
+ struct airoha_eth *eth = qdma->eth;
+ int i, qid = q - &qdma->q_tx[0];
dma_addr_t dma_addr;
spin_lock_init(&q->lock);
q->ndesc = size;
- q->eth = eth;
+ q->qdma = qdma;
q->free_thr = 1 + MAX_SKB_FRAGS;
q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
@@ -1738,20 +1765,20 @@ static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
}
- airoha_qdma_wr(eth, REG_TX_RING_BASE(qid), dma_addr);
- airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
- airoha_qdma_rmw(eth, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
+ airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
return 0;
}
-static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
- struct airoha_tx_irq_queue *irq_q,
- int size)
+static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
+ struct airoha_qdma *qdma, int size)
{
- int id = irq_q - &eth->q_tx_irq[0];
+ int id = irq_q - &qdma->q_tx_irq[0];
+ struct airoha_eth *eth = qdma->eth;
dma_addr_t dma_addr;
netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
@@ -1763,30 +1790,30 @@ static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
memset(irq_q->q, 0xff, size * sizeof(u32));
irq_q->size = size;
- irq_q->eth = eth;
+ irq_q->qdma = qdma;
- airoha_qdma_wr(eth, REG_TX_IRQ_BASE(id), dma_addr);
- airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
+ airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
+ airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
- airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
+ airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
FIELD_PREP(TX_IRQ_THR_MASK, 1));
return 0;
}
-static int airoha_qdma_init_tx(struct airoha_eth *eth)
+static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
{
int i, err;
- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
- err = airoha_qdma_tx_irq_init(eth, &eth->q_tx_irq[i],
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
IRQ_QUEUE_LEN(i));
if (err)
return err;
}
- for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
- err = airoha_qdma_init_tx_queue(eth, &eth->q_tx[i],
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
TX_DSCP_NUM);
if (err)
return err;
@@ -1797,7 +1824,7 @@ static int airoha_qdma_init_tx(struct airoha_eth *eth)
static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
{
- struct airoha_eth *eth = q->eth;
+ struct airoha_eth *eth = q->qdma->eth;
spin_lock_bh(&q->lock);
while (q->queued) {
@@ -1814,34 +1841,35 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
spin_unlock_bh(&q->lock);
}
-static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth)
+static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
{
+ struct airoha_eth *eth = qdma->eth;
dma_addr_t dma_addr;
u32 status;
int size;
size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
- eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!eth->hfwd.desc)
+ qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL);
+ if (!qdma->hfwd.desc)
return -ENOMEM;
- airoha_qdma_wr(eth, REG_FWD_DSCP_BASE, dma_addr);
+ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
- eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!eth->hfwd.q)
+ qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL);
+ if (!qdma->hfwd.q)
return -ENOMEM;
- airoha_qdma_wr(eth, REG_FWD_BUF_BASE, dma_addr);
+ airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
- airoha_qdma_rmw(eth, REG_HW_FWD_DSCP_CFG,
+ airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
- airoha_qdma_rmw(eth, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
+ airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
- airoha_qdma_rmw(eth, REG_LMGR_INIT_CFG,
+ airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
HW_FWD_DESC_NUM_MASK,
FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
@@ -1849,87 +1877,87 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth)
return read_poll_timeout(airoha_qdma_rr, status,
!(status & LMGR_INIT_START), USEC_PER_MSEC,
- 30 * USEC_PER_MSEC, true, eth,
+ 30 * USEC_PER_MSEC, true, qdma,
REG_LMGR_INIT_CFG);
}
-static void airoha_qdma_init_qos(struct airoha_eth *eth)
+static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
{
- airoha_qdma_clear(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
- airoha_qdma_set(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
+ airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
+ airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
- airoha_qdma_clear(eth, REG_PSE_BUF_USAGE_CFG,
+ airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
PSE_BUF_ESTIMATE_EN_MASK);
- airoha_qdma_set(eth, REG_EGRESS_RATE_METER_CFG,
+ airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
EGRESS_RATE_METER_EN_MASK |
EGRESS_RATE_METER_EQ_RATE_EN_MASK);
/* 2047us x 31 = 63.457ms */
- airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
+ airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
EGRESS_RATE_METER_WINDOW_SZ_MASK,
FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
- airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
+ airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
EGRESS_RATE_METER_TIMESLICE_MASK,
FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
/* ratelimit init */
- airoha_qdma_set(eth, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
+ airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
/* fast-tick 25us */
- airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
+ airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
FIELD_PREP(GLB_FAST_TICK_MASK, 25));
- airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
+ airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
- airoha_qdma_set(eth, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
- airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
+ airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
+ airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
- airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG,
+ airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
EGRESS_SLOW_TICK_RATIO_MASK,
FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
- airoha_qdma_set(eth, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
- airoha_qdma_clear(eth, REG_INGRESS_TRTCM_CFG,
+ airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
+ airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
INGRESS_TRTCM_MODE_MASK);
- airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
+ airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
- airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG,
+ airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
INGRESS_SLOW_TICK_RATIO_MASK,
FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
- airoha_qdma_set(eth, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
- airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
+ airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
+ airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
FIELD_PREP(SLA_FAST_TICK_MASK, 25));
- airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
+ airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
}
-static int airoha_qdma_hw_init(struct airoha_eth *eth)
+static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
{
int i;
/* clear pending irqs */
- for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++)
- airoha_qdma_wr(eth, REG_INT_STATUS(i), 0xffffffff);
+ for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
/* setup irqs */
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
+ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
+ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
+ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
/* setup irq binding */
- for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
- if (!eth->q_tx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
continue;
if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
- airoha_qdma_set(eth, REG_TX_RING_BLOCKING(i),
+ airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
TX_RING_IRQ_BLOCKING_CFG_MASK);
else
- airoha_qdma_clear(eth, REG_TX_RING_BLOCKING(i),
+ airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
TX_RING_IRQ_BLOCKING_CFG_MASK);
}
- airoha_qdma_wr(eth, REG_QDMA_GLOBAL_CFG,
+ airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
GLOBAL_CFG_RX_2B_OFFSET_MASK |
FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
GLOBAL_CFG_CPU_TXR_RR_MASK |
@@ -1940,18 +1968,18 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth)
GLOBAL_CFG_TX_WB_DONE_MASK |
FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
- airoha_qdma_init_qos(eth);
+ airoha_qdma_init_qos(qdma);
/* disable qdma rx delay interrupt */
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
- if (!eth->q_rx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
continue;
- airoha_qdma_clear(eth, REG_RX_DELAY_INT_IDX(i),
+ airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
RX_DELAY_INT_MASK);
}
- airoha_qdma_set(eth, REG_TXQ_CNGST_CFG,
+ airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
return 0;
@@ -1959,150 +1987,180 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth)
static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
{
- struct airoha_eth *eth = dev_instance;
- u32 intr[ARRAY_SIZE(eth->irqmask)];
+ struct airoha_qdma *qdma = dev_instance;
+ u32 intr[ARRAY_SIZE(qdma->irqmask)];
int i;
- for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) {
- intr[i] = airoha_qdma_rr(eth, REG_INT_STATUS(i));
- intr[i] &= eth->irqmask[i];
- airoha_qdma_wr(eth, REG_INT_STATUS(i), intr[i]);
+ for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
+ intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
+ intr[i] &= qdma->irqmask[i];
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
}
- if (!test_bit(DEV_STATE_INITIALIZED, &eth->state))
+ if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
return IRQ_NONE;
if (intr[1] & RX_DONE_INT_MASK) {
- airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1,
+ airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
RX_DONE_INT_MASK);
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
- if (!eth->q_rx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
continue;
if (intr[1] & BIT(i))
- napi_schedule(&eth->q_rx[i].napi);
+ napi_schedule(&qdma->q_rx[i].napi);
}
}
if (intr[0] & INT_TX_MASK) {
- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
- struct airoha_tx_irq_queue *irq_q = &eth->q_tx_irq[i];
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i];
u32 status, head;
if (!(intr[0] & TX_DONE_INT_MASK(i)))
continue;
- airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0,
+ airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(i));
- status = airoha_qdma_rr(eth, REG_IRQ_STATUS(i));
+ status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i));
head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
irq_q->head = head % irq_q->size;
irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
- napi_schedule(&eth->q_tx_irq[i].napi);
+ napi_schedule(&qdma->q_tx_irq[i].napi);
}
}
return IRQ_HANDLED;
}
-static int airoha_qdma_init(struct airoha_eth *eth)
+static int airoha_qdma_init(struct platform_device *pdev,
+ struct airoha_eth *eth,
+ struct airoha_qdma *qdma)
{
- int err;
+ int err, id = qdma - &eth->qdma[0];
+ const char *res;
- err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, eth);
- if (err)
- return err;
+ spin_lock_init(&qdma->irq_lock);
+ qdma->eth = eth;
+
+ res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
+ if (!res)
+ return -ENOMEM;
+
+ qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
+ if (IS_ERR(qdma->regs))
+ return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
+ "failed to iomap qdma%d regs\n", id);
+
+ qdma->irq = platform_get_irq(pdev, 4 * id);
+ if (qdma->irq < 0)
+ return qdma->irq;
- err = airoha_qdma_init_rx(eth);
+ err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, qdma);
if (err)
return err;
- err = airoha_qdma_init_tx(eth);
+ err = airoha_qdma_init_rx(qdma);
if (err)
return err;
- err = airoha_qdma_init_hfwd_queues(eth);
+ err = airoha_qdma_init_tx(qdma);
if (err)
return err;
- err = airoha_qdma_hw_init(eth);
+ err = airoha_qdma_init_hfwd_queues(qdma);
if (err)
return err;
- set_bit(DEV_STATE_INITIALIZED, &eth->state);
-
- return 0;
+ return airoha_qdma_hw_init(qdma);
}
-static int airoha_hw_init(struct airoha_eth *eth)
+static int airoha_hw_init(struct platform_device *pdev,
+ struct airoha_eth *eth)
{
- int err;
+ int err, i;
/* disable xsi */
- reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts);
+ err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
+ eth->xsi_rsts);
+ if (err)
+ return err;
+
+ err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ if (err)
+ return err;
- reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
- msleep(20);
- reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
msleep(20);
+ err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ if (err)
+ return err;
+ msleep(20);
err = airoha_fe_init(eth);
if (err)
return err;
- return airoha_qdma_init(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
+ err = airoha_qdma_init(pdev, eth, &eth->qdma[i]);
+ if (err)
+ return err;
+ }
+
+ set_bit(DEV_STATE_INITIALIZED, &eth->state);
+
+ return 0;
}
-static void airoha_hw_cleanup(struct airoha_eth *eth)
+static void airoha_hw_cleanup(struct airoha_qdma *qdma)
{
int i;
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
- if (!eth->q_rx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
continue;
- napi_disable(&eth->q_rx[i].napi);
- netif_napi_del(&eth->q_rx[i].napi);
- airoha_qdma_cleanup_rx_queue(&eth->q_rx[i]);
- if (eth->q_rx[i].page_pool)
- page_pool_destroy(eth->q_rx[i].page_pool);
+ napi_disable(&qdma->q_rx[i].napi);
+ netif_napi_del(&qdma->q_rx[i].napi);
+ airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
+ if (qdma->q_rx[i].page_pool)
+ page_pool_destroy(qdma->q_rx[i].page_pool);
}
- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
- napi_disable(&eth->q_tx_irq[i].napi);
- netif_napi_del(&eth->q_tx_irq[i].napi);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ napi_disable(&qdma->q_tx_irq[i].napi);
+ netif_napi_del(&qdma->q_tx_irq[i].napi);
}
- for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
- if (!eth->q_tx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
continue;
- airoha_qdma_cleanup_tx_queue(&eth->q_tx[i]);
+ airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
}
}
-static void airoha_qdma_start_napi(struct airoha_eth *eth)
+static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
{
int i;
- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++)
- napi_enable(&eth->q_tx_irq[i].napi);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ napi_enable(&qdma->q_tx_irq[i].napi);
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
- if (!eth->q_rx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
continue;
- napi_enable(&eth->q_rx[i].napi);
+ napi_enable(&qdma->q_rx[i].napi);
}
}
static void airoha_update_hw_stats(struct airoha_gdm_port *port)
{
- struct airoha_eth *eth = port->eth;
+ struct airoha_eth *eth = port->qdma->eth;
u32 val, i = 0;
spin_lock(&port->stats.lock);
@@ -2247,23 +2305,24 @@ static void airoha_update_hw_stats(struct airoha_gdm_port *port)
static int airoha_dev_open(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
- struct airoha_eth *eth = port->eth;
+ struct airoha_qdma *qdma = port->qdma;
int err;
netif_tx_start_all_queues(dev);
- err = airoha_set_gdm_ports(eth, true);
+ err = airoha_set_gdm_ports(qdma->eth, true);
if (err)
return err;
if (netdev_uses_dsa(dev))
- airoha_fe_set(eth, REG_GDM_INGRESS_CFG(port->id),
+ airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
else
- airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id),
+ airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
- airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
- airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
+ airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
return 0;
}
@@ -2271,16 +2330,17 @@ static int airoha_dev_open(struct net_device *dev)
static int airoha_dev_stop(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
- struct airoha_eth *eth = port->eth;
+ struct airoha_qdma *qdma = port->qdma;
int err;
netif_tx_disable(dev);
- err = airoha_set_gdm_ports(eth, false);
+ err = airoha_set_gdm_ports(qdma->eth, false);
if (err)
return err;
- airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
- airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
+ airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
return 0;
}
@@ -2294,7 +2354,7 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
if (err)
return err;
- airoha_set_macaddr(port->eth, dev->dev_addr);
+ airoha_set_macaddr(port, dev->dev_addr);
return 0;
}
@@ -2303,7 +2363,7 @@ static int airoha_dev_init(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
- airoha_set_macaddr(port->eth, dev->dev_addr);
+ airoha_set_macaddr(port, dev->dev_addr);
return 0;
}
@@ -2337,7 +2397,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
struct airoha_gdm_port *port = netdev_priv(dev);
u32 msg0 = 0, msg1, len = skb_headlen(skb);
int i, qid = skb_get_queue_mapping(skb);
- struct airoha_eth *eth = port->eth;
+ struct airoha_qdma *qdma = port->qdma;
u32 nr_frags = 1 + sinfo->nr_frags;
struct netdev_queue *txq;
struct airoha_queue *q;
@@ -2367,7 +2427,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
- q = &eth->q_tx[qid];
+ q = &qdma->q_tx[qid];
if (WARN_ON_ONCE(!q->ndesc))
goto error;
@@ -2411,9 +2471,6 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
e->dma_addr = addr;
e->dma_len = len;
- airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
- FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
-
data = skb_frag_address(frag);
len = skb_frag_size(frag);
}
@@ -2422,6 +2479,11 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
q->queued += i;
skb_tx_timestamp(skb);
+ if (!netdev_xmit_more())
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
+ TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
+
if (q->ndesc - q->queued < q->free_thr)
netif_tx_stop_queue(txq);
@@ -2448,7 +2510,7 @@ static void airoha_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct airoha_gdm_port *port = netdev_priv(dev);
- struct airoha_eth *eth = port->eth;
+ struct airoha_eth *eth = port->qdma->eth;
strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
@@ -2529,6 +2591,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
{
const __be32 *id_ptr = of_get_property(np, "reg", NULL);
struct airoha_gdm_port *port;
+ struct airoha_qdma *qdma;
struct net_device *dev;
int err, index;
u32 id;
@@ -2558,6 +2621,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
return -ENOMEM;
}
+ qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA];
dev->netdev_ops = &airoha_netdev_ops;
dev->ethtool_ops = &airoha_ethtool_ops;
dev->max_mtu = AIROHA_MAX_MTU;
@@ -2567,6 +2631,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
NETIF_F_SG | NETIF_F_TSO;
dev->features |= dev->hw_features;
dev->dev.of_node = np;
+ dev->irq = qdma->irq;
SET_NETDEV_DEV(dev, eth->dev);
err = of_get_ethdev_address(np, dev);
@@ -2582,8 +2647,8 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
port = netdev_priv(dev);
u64_stats_init(&port->stats.syncp);
spin_lock_init(&port->stats.lock);
+ port->qdma = qdma;
port->dev = dev;
- port->eth = eth;
port->id = id;
eth->ports[index] = port;
@@ -2613,11 +2678,6 @@ static int airoha_probe(struct platform_device *pdev)
return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
"failed to iomap fe regs\n");
- eth->qdma_regs = devm_platform_ioremap_resource_byname(pdev, "qdma0");
- if (IS_ERR(eth->qdma_regs))
- return dev_err_probe(eth->dev, PTR_ERR(eth->qdma_regs),
- "failed to iomap qdma regs\n");
-
eth->rsts[0].id = "fe";
eth->rsts[1].id = "pdma";
eth->rsts[2].id = "qdma";
@@ -2642,11 +2702,6 @@ static int airoha_probe(struct platform_device *pdev)
return err;
}
- spin_lock_init(&eth->irq_lock);
- eth->irq = platform_get_irq(pdev, 0);
- if (eth->irq < 0)
- return eth->irq;
-
eth->napi_dev = alloc_netdev_dummy(0);
if (!eth->napi_dev)
return -ENOMEM;
@@ -2656,11 +2711,13 @@ static int airoha_probe(struct platform_device *pdev)
strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
platform_set_drvdata(pdev, eth);
- err = airoha_hw_init(eth);
+ err = airoha_hw_init(pdev, eth);
if (err)
goto error;
- airoha_qdma_start_napi(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_qdma_start_napi(&eth->qdma[i]);
+
for_each_child_of_node(pdev->dev.of_node, np) {
if (!of_device_is_compatible(np, "airoha,eth-mac"))
continue;
@@ -2678,7 +2735,9 @@ static int airoha_probe(struct platform_device *pdev)
return 0;
error:
- airoha_hw_cleanup(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_hw_cleanup(&eth->qdma[i]);
+
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
@@ -2696,7 +2755,9 @@ static void airoha_remove(struct platform_device *pdev)
struct airoha_eth *eth = platform_get_drvdata(pdev);
int i;
- airoha_hw_cleanup(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_hw_cleanup(&eth->qdma[i]);
+
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
@@ -2715,6 +2776,7 @@ static const struct of_device_id of_airoha_match[] = {
{ .compatible = "airoha,en7581-eth" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, of_airoha_match);
static struct platform_driver airoha_driver = {
.probe = airoha_probe,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 16ca427cf4c3..ed7313c10a05 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1171,7 +1171,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
- for (i = 0; i < cnt; i++) {
+ for (i = 0; i < len; i++) {
struct mtk_tx_dma_v2 *txd;
txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index eb1708b43aa3..0d5225f1d3ee 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -724,12 +724,8 @@ enum mtk_clks_map {
MTK_CLK_ETHWARP_WOCPU2,
MTK_CLK_ETHWARP_WOCPU1,
MTK_CLK_ETHWARP_WOCPU0,
- MTK_CLK_TOP_USXGMII_SBUS_0_SEL,
- MTK_CLK_TOP_USXGMII_SBUS_1_SEL,
MTK_CLK_TOP_SGM_0_SEL,
MTK_CLK_TOP_SGM_1_SEL,
- MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL,
- MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL,
MTK_CLK_TOP_ETH_GMII_SEL,
MTK_CLK_TOP_ETH_REFCK_50M_SEL,
MTK_CLK_TOP_ETH_SYS_200M_SEL,
@@ -800,19 +796,9 @@ enum mtk_clks_map {
BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \
BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \
BIT_ULL(MTK_CLK_CRYPTO) | \
- BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
- BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
- BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
- BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU2) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU1) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU0) | \
- BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_0_SEL) | \
- BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_1_SEL) | \
- BIT_ULL(MTK_CLK_TOP_SGM_0_SEL) | \
- BIT_ULL(MTK_CLK_TOP_SGM_1_SEL) | \
- BIT_ULL(MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL) | \
- BIT_ULL(MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 0acee405a749..ada852adc5f7 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -8,8 +8,11 @@
#include <linux/platform_device.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+
#include <net/dst_metadata.h>
#include <net/dsa.h>
+#include <net/ipv6.h>
+
#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
#include "mtk_ppe_regs.h"
@@ -338,7 +341,6 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
{
int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
u32 *src, *dest;
- int i;
switch (type) {
case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
@@ -359,10 +361,8 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
return -EINVAL;
}
- for (i = 0; i < 4; i++)
- src[i] = be32_to_cpu(src_addr[i]);
- for (i = 0; i < 4; i++)
- dest[i] = be32_to_cpu(dest_addr[i]);
+ ipv6_addr_be32_to_cpu(src, src_addr);
+ ipv6_addr_be32_to_cpu(dest, dest_addr);
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index 1a97feca77f2..570ebf91f693 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -3,6 +3,9 @@
#include <linux/kernel.h>
#include <linux/debugfs.h>
+
+#include <net/ipv6.h>
+
#include "mtk_eth_soc.h"
struct mtk_flow_addr_info
@@ -47,16 +50,14 @@ static const char *mtk_foe_pkt_type_str(int type)
static void
mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6)
{
- __be32 n_addr[4];
- int i;
+ __be32 n_addr[IPV6_ADDR_WORDS];
if (!ipv6) {
seq_printf(m, "%pI4h", addr);
return;
}
- for (i = 0; i < ARRAY_SIZE(n_addr); i++)
- n_addr[i] = htonl(addr[i]);
+ ipv6_addr_cpu_to_be32(n_addr, addr);
seq_printf(m, "%pI6", n_addr);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 61334a71058c..e212a4ba9275 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -2666,14 +2666,15 @@ mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_pri
{
struct mtk_wed_flow_block_priv *priv = cb_priv;
struct flow_cls_offload *cls = type_data;
- struct mtk_wed_hw *hw = priv->hw;
+ struct mtk_wed_hw *hw = NULL;
- if (!tc_can_offload(priv->dev))
+ if (!priv || !tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (type != TC_SETUP_CLSFLOWER)
return -EOPNOTSUPP;
+ hw = priv->hw;
return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
}
@@ -2729,6 +2730,7 @@ mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
kfree(block_cb->cb_priv);
+ block_cb->cb_priv = NULL;
}
return 0;
default:
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
index ea0884186d76..c06e5ad18b01 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
@@ -10,7 +10,7 @@
#include <linux/of_reserved_mem.h>
#include <linux/mfd/syscon.h>
#include <linux/soc/mediatek/mtk_wed.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "mtk_wed_regs.h"
#include "mtk_wed_wo.h"
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 943d6918c2ec..cd17a3f4faf8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -2036,20 +2036,20 @@ static int mlx4_en_get_module_info(struct net_device *dev,
switch (data[0] /* identifier */) {
case MLX4_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
break;
case MLX4_MODULE_ID_QSFP_PLUS:
if (data[1] >= 0x3) { /* revision id */
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
}
break;
case MLX4_MODULE_ID_QSFP28:
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 685335832a93..ea6070180c96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -172,6 +172,16 @@ config MLX5_SW_STEERING
help
Build support for software-managed steering in the NIC.
+config MLX5_HW_STEERING
+ bool "Mellanox Technologies hardware-managed steering"
+ depends on MLX5_CORE_EN && MLX5_ESWITCH
+ default y
+ help
+ Build support for Hardware-Managed Flow Steering (HMFS) in the NIC.
+ HMFS is a new approach to managing steering rules where STEs are
+ written to ICM by HW (as opposed to SW in software-managed steering),
+ which allows higher rate of rule insertion.
+
config MLX5_SF
bool "Mellanox Technologies subfunction device support using auxiliary device"
depends on MLX5_CORE && MLX5_CORE_EN
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 1289475e7be7..5912f7e614f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -119,6 +119,27 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_action.o steering/fs_dr.o \
steering/dr_definer.o steering/dr_ptrn.o \
steering/dr_arg.o steering/dr_dbg.o lib/smfs.o
+
+#
+# HW Steering
+#
+mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/mlx5hws_cmd.o \
+ steering/hws/mlx5hws_context.o \
+ steering/hws/mlx5hws_pat_arg.o \
+ steering/hws/mlx5hws_buddy.o \
+ steering/hws/mlx5hws_pool.o \
+ steering/hws/mlx5hws_table.o \
+ steering/hws/mlx5hws_action.o \
+ steering/hws/mlx5hws_rule.o \
+ steering/hws/mlx5hws_matcher.o \
+ steering/hws/mlx5hws_send.o \
+ steering/hws/mlx5hws_definer.o \
+ steering/hws/mlx5hws_bwc.o \
+ steering/hws/mlx5hws_debug.o \
+ steering/hws/mlx5hws_vport.o \
+ steering/hws/mlx5hws_bwc_complex.o
+
+
#
# SF device
#
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 20768ef2e9d2..6bd8a18e3af3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -754,6 +754,8 @@ static const char *cmd_status_str(u8 status)
return "bad resource";
case MLX5_CMD_STAT_RES_BUSY:
return "resource busy";
+ case MLX5_CMD_STAT_NOT_READY:
+ return "FW not ready";
case MLX5_CMD_STAT_LIM_ERR:
return "limits exceeded";
case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
@@ -787,6 +789,7 @@ static int cmd_status_to_err(u8 status)
case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
+ case MLX5_CMD_STAT_NOT_READY: return -EAGAIN;
case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
@@ -815,14 +818,16 @@ EXPORT_SYMBOL(mlx5_cmd_out_err);
static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
{
u16 opcode, op_mod;
+ u8 status;
u16 uid;
opcode = in_to_opcode(in);
op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid);
+ status = MLX5_GET(mbox_out, out, status);
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
- opcode != MLX5_CMD_OP_CREATE_UCTX)
+ opcode != MLX5_CMD_OP_CREATE_UCTX && status != MLX5_CMD_STAT_NOT_READY)
mlx5_cmd_out_err(dev, opcode, op_mod, out);
}
@@ -1760,6 +1765,10 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
}
}
+#define MLX5_MAX_MANAGE_PAGES_CMD_ENT 1
+#define MLX5_CMD_MASK ((1UL << (cmd->vars.max_reg_cmds + \
+ MLX5_MAX_MANAGE_PAGES_CMD_ENT)) - 1)
+
static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
@@ -1771,7 +1780,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
/* wait for pending handlers to complete */
mlx5_eq_synchronize_cmd_irq(dev);
spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
- vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1);
+ vector = ~dev->cmd.vars.bitmask & MLX5_CMD_MASK;
if (!vector)
goto no_trig;
@@ -1882,10 +1891,12 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
if (throttle_op) {
- /* atomic context may not sleep */
- if (callback)
- return -EINVAL;
- down(&dev->cmd.vars.throttle_sem);
+ if (callback) {
+ if (down_trylock(&dev->cmd.vars.throttle_sem))
+ return -EBUSY;
+ } else {
+ down(&dev->cmd.vars.throttle_sem);
+ }
}
pages_queue = is_manage_pages(in);
@@ -2091,10 +2102,19 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
{
struct mlx5_async_work *work = _work;
struct mlx5_async_ctx *ctx;
+ struct mlx5_core_dev *dev;
+ u16 opcode;
ctx = work->ctx;
- status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out);
+ dev = ctx->dev;
+ opcode = work->opcode;
+ status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
+ /* Can't access "work" from this point on. It could have been freed in
+ * the callback.
+ */
+ if (mlx5_cmd_is_throttle_opcode(opcode))
+ up(&dev->cmd.vars.throttle_sem);
if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
}
@@ -2345,7 +2365,7 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
cmd->state = MLX5_CMDIF_STATE_DOWN;
cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
- cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1;
+ cmd->vars.bitmask = MLX5_CMD_MASK;
sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
sema_init(&cmd->vars.pages_sem, 1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index ddf1b87f1bc0..9aed29fa4900 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -203,10 +203,10 @@ TRACE_EVENT(mlx5_fs_set_fte,
fs_get_obj(__entry->fg, fte->node.parent);
__entry->group_index = __entry->fg->id;
__entry->index = fte->index;
- __entry->action = fte->action.action;
+ __entry->action = fte->act_dests.action.action;
__entry->mask_enable = __entry->fg->mask.match_criteria_enable;
- __entry->flow_tag = fte->flow_context.flow_tag;
- __entry->flow_source = fte->flow_context.flow_source;
+ __entry->flow_tag = fte->act_dests.flow_context.flow_tag;
+ __entry->flow_source = fte->act_dests.flow_context.flow_source;
memcpy(__entry->mask_outer,
MLX5_ADDR_OF(fte_match_param,
&__entry->fg->mask.match_criteria,
@@ -284,7 +284,7 @@ TRACE_EVENT(mlx5_fs_add_rule,
TP_fast_assign(
__entry->rule = rule;
fs_get_obj(__entry->fte, rule->node.parent);
- __entry->index = __entry->fte->dests_size - 1;
+ __entry->index = __entry->fte->act_dests.dests_size - 1;
__entry->sw_action = rule->sw_action;
memcpy(__entry->destination,
&rule->dest_attr,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 5fd82c67b6ab..57b7298a0e79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -130,7 +130,7 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
-#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
+#define MLX5E_DEFAULT_SHAMPO_TIMEOUT 1024
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
@@ -627,7 +627,7 @@ struct mlx5e_shampo_hd {
struct mlx5e_dma_info *info;
struct mlx5e_frag_page *pages;
u16 curr_page_index;
- u16 hd_per_wq;
+ u32 hd_per_wq;
u16 hd_per_wqe;
unsigned long *bitmap;
u16 pi;
@@ -998,6 +998,7 @@ void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode);
+void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len);
void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
@@ -1172,14 +1173,16 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param);
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param);
+ struct ethtool_ringparam *param,
+ struct netlink_ext_ack *extack);
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch);
int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch);
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
- struct kernel_ethtool_coalesce *kernel_coal);
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 6c9ccccca81e..64b62ed17b07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -928,7 +928,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, log_headers_entry_size,
mlx5e_shampo_get_log_hd_entry_size(mdev, params));
MLX5_SET(rqc, rqc, reservation_timeout,
- params->packet_merge.timeout);
+ mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_SHAMPO_TIMEOUT));
MLX5_SET(rqc, rqc, shampo_match_criteria_type,
params->packet_merge.shampo.match_criteria_type);
MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
@@ -1087,6 +1087,20 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
return wqebbs;
}
+#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
+
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
+{
+ int i;
+
+ /* The supported periods are organized in ascending order */
+ for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
+ if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
+ break;
+
+ return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
+}
+
static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 749b2ec0436e..3f8986f9d862 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -108,6 +108,7 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param);
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 22918b2ef7f1..09433b91be17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -146,7 +146,9 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
return err;
}
+ mutex_lock(&priv->state_lock);
err = mlx5e_safe_reopen_channels(priv);
+ mutex_unlock(&priv->state_lock);
if (!err) {
to_ctx->status = 1; /* all channels recovered */
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
index bb6b1a979ba1..62b3f7ff5562 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
@@ -25,6 +25,8 @@ struct mlx5_ct_fs_ops {
struct mlx5_flow_attr *attr,
struct flow_rule *flow_rule);
void (*ct_rule_del)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule);
+ int (*ct_rule_update)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr);
size_t priv_size;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
index ae4f55be48ce..64a82aafaaca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
@@ -65,9 +65,30 @@ mlx5_ct_fs_dmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru
kfree(dmfs_rule);
}
+static int mlx5_ct_fs_dmfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_ct_fs_dmfs_rule *dmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_dmfs_rule,
+ fs_rule);
+ struct mlx5e_priv *priv = netdev_priv(fs->netdev);
+ struct mlx5_flow_handle *rule;
+
+ rule = mlx5_tc_rule_insert(priv, spec, attr);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
+ mlx5_tc_rule_delete(priv, dmfs_rule->rule, dmfs_rule->attr);
+
+ dmfs_rule->rule = rule;
+ dmfs_rule->attr = attr;
+
+ return 0;
+}
+
static struct mlx5_ct_fs_ops dmfs_ops = {
.ct_rule_add = mlx5_ct_fs_dmfs_ct_rule_add,
.ct_rule_del = mlx5_ct_fs_dmfs_ct_rule_del,
+ .ct_rule_update = mlx5_ct_fs_dmfs_ct_rule_update,
.init = mlx5_ct_fs_dmfs_init,
.destroy = mlx5_ct_fs_dmfs_destroy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 8c531f4ec912..1c062a2e8996 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -368,9 +368,35 @@ mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru
kfree(smfs_rule);
}
+static int mlx5_ct_fs_smfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_ct_fs_smfs_rule *smfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_smfs_rule,
+ fs_rule);
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+ struct mlx5dr_action *actions[3]; /* We only need to create 3 actions, see below. */
+ struct mlx5dr_rule *rule;
+
+ actions[0] = smfs_rule->count_action;
+ actions[1] = attr->modify_hdr->action.dr_action;
+ actions[2] = fs_smfs->fwd_action;
+
+ rule = mlx5_smfs_rule_create(smfs_rule->smfs_matcher->dr_matcher, spec,
+ ARRAY_SIZE(actions), actions, spec->flow_context.flow_source);
+ if (!rule)
+ return -EINVAL;
+
+ mlx5_smfs_rule_destroy(smfs_rule->rule);
+ smfs_rule->rule = rule;
+
+ return 0;
+}
+
static struct mlx5_ct_fs_ops fs_smfs_ops = {
.ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add,
.ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del,
+ .ct_rule_update = mlx5_ct_fs_smfs_ct_rule_update,
.init = mlx5_ct_fs_smfs_init,
.destroy = mlx5_ct_fs_smfs_destroy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 71a168746ebe..dcfccaaa8d91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -876,15 +876,14 @@ err_attr:
}
static int
-mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
- struct flow_rule *flow_rule,
- struct mlx5_ct_entry *entry,
- bool nat, u8 zone_restore_id)
+mlx5_tc_ct_entry_update_rule(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ bool nat, u8 zone_restore_id)
{
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr;
struct mlx5e_mod_hdr_handle *mh;
- struct mlx5_ct_fs_rule *rule;
struct mlx5_flow_spec *spec;
int err;
@@ -902,29 +901,26 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
nat, mlx5_tc_ct_entry_in_ct_nat_table(entry));
if (err) {
- ct_dbg("Failed to create ct entry mod hdr");
+ ct_dbg("Failed to create ct entry mod hdr, err: %d", err);
goto err_mod_hdr;
}
mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
- rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- ct_dbg("Failed to add replacement ct entry rule, nat: %d", nat);
+ err = ct_priv->fs_ops->ct_rule_update(ct_priv->fs, zone_rule->rule, spec, attr);
+ if (err) {
+ ct_dbg("Failed to update ct entry rule, nat: %d, err: %d", nat, err);
goto err_rule;
}
- ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
- zone_rule->rule = rule;
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh);
zone_rule->mh = mh;
mlx5_put_label_mapping(ct_priv, old_attr->ct_attr.ct_labels_id);
kfree(old_attr);
kvfree(spec);
- ct_dbg("Replaced ct entry rule in zone %d", entry->tuple.zone);
+ ct_dbg("Updated ct entry rule in zone %d", entry->tuple.zone);
return 0;
@@ -1141,23 +1137,23 @@ err_orig:
}
static int
-mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
- struct flow_rule *flow_rule,
- struct mlx5_ct_entry *entry,
- u8 zone_restore_id)
+mlx5_tc_ct_entry_update_rules(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ u8 zone_restore_id)
{
int err = 0;
if (mlx5_tc_ct_entry_in_ct_table(entry)) {
- err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
- zone_restore_id);
+ err = mlx5_tc_ct_entry_update_rule(ct_priv, flow_rule, entry, false,
+ zone_restore_id);
if (err)
return err;
}
if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
- err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
- zone_restore_id);
+ err = mlx5_tc_ct_entry_update_rule(ct_priv, flow_rule, entry, true,
+ zone_restore_id);
if (err && mlx5_tc_ct_entry_in_ct_table(entry))
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
}
@@ -1165,13 +1161,13 @@ mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
}
static int
-mlx5_tc_ct_block_flow_offload_replace(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
- struct mlx5_ct_entry *entry, unsigned long cookie)
+mlx5_tc_ct_block_flow_offload_update(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry, unsigned long cookie)
{
struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
int err;
- err = mlx5_tc_ct_entry_replace_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
+ err = mlx5_tc_ct_entry_update_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
if (!err)
return 0;
@@ -1216,7 +1212,7 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
entry->restore_cookie = meta_action->ct_metadata.cookie;
spin_unlock_bh(&ct_priv->ht_lock);
- err = mlx5_tc_ct_block_flow_offload_replace(ft, flow_rule, entry, cookie);
+ err = mlx5_tc_ct_block_flow_offload_update(ft, flow_rule, entry, cookie);
mlx5_tc_ct_entry_put(entry);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 6cc23af66b5b..efb34de4cb7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -109,6 +109,7 @@ struct mlx5e_tc_flow {
struct completion init_done;
struct completion del_hw_done;
struct mlx5_flow_attr *attr;
+ struct mlx5_flow_attr *extra_split_attr;
struct list_head attrs;
u32 chain_mapping;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
index d4239e3b3c88..11f724ad90db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -23,6 +23,9 @@ struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify)
struct mlx5e_tir_builder *builder;
builder = kvzalloc(sizeof(*builder), GFP_KERNEL);
+ if (!builder)
+ return NULL;
+
builder->modify = modify;
return builder;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 3d274599015b..ca92e518be76 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -67,7 +67,6 @@ static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work)
return;
spin_lock_bh(&x->lock);
- xfrm_state_check_expire(x);
if (x->km.state == XFRM_STATE_EXPIRED) {
sa_entry->attrs.drop = true;
spin_unlock_bh(&x->lock);
@@ -75,6 +74,13 @@ static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work)
mlx5e_accel_ipsec_fs_modify(sa_entry);
return;
}
+
+ if (x->km.state != XFRM_STATE_VALID) {
+ spin_unlock_bh(&x->lock);
+ return;
+ }
+
+ xfrm_state_check_expire(x);
spin_unlock_bh(&x->lock);
queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 797db853de36..53cfa39188cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -127,6 +127,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
attrs->lft.hard_packet_limit);
MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
+ MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
}
if (attrs->lft.soft_packet_limit != XFRM_INF) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 36845872ae94..1966736f98b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -83,17 +83,15 @@ struct ptys2ethtool_config ptys2ext_ethtool_table[MLX5E_EXT_LINK_MODES_NUMBER];
({ \
struct ptys2ethtool_config *cfg; \
const unsigned int modes[] = { __VA_ARGS__ }; \
- unsigned int i, bit, idx; \
+ unsigned int i; \
cfg = &ptys2##table##_ethtool_table[reg_]; \
bitmap_zero(cfg->supported, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \
bitmap_zero(cfg->advertised, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \
for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
- bit = modes[i] % 64; \
- idx = modes[i] / 64; \
- __set_bit(bit, &cfg->supported[idx]); \
- __set_bit(bit, &cfg->advertised[idx]); \
+ bitmap_set(cfg->supported, modes[i], 1); \
+ bitmap_set(cfg->advertised, modes[i], 1); \
} \
})
@@ -139,6 +137,10 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, legacy,
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100BASE_TX, legacy,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_T, legacy,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, legacy,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, legacy,
@@ -204,6 +206,12 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_8_400GBASE_CR8, ext,
+ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_1_100GBASE_CR_KR, ext,
ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
@@ -354,35 +362,25 @@ static void mlx5e_get_ringparam(struct net_device *dev,
}
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_params new_params;
u8 log_rq_size;
u8 log_sq_size;
int err = 0;
- if (param->rx_jumbo_pending) {
- netdev_info(priv->netdev, "%s: rx_jumbo_pending not supported\n",
- __func__);
- return -EINVAL;
- }
- if (param->rx_mini_pending) {
- netdev_info(priv->netdev, "%s: rx_mini_pending not supported\n",
- __func__);
- return -EINVAL;
- }
-
if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
- netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n",
- __func__, param->rx_pending,
- 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "rx (%d) < min (%d)",
+ param->rx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
return -EINVAL;
}
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
- netdev_info(priv->netdev, "%s: tx_pending (%d) < min (%d)\n",
- __func__, param->tx_pending,
- 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "tx (%d) < min (%d)",
+ param->tx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
return -EINVAL;
}
@@ -418,7 +416,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- return mlx5e_ethtool_set_ringparam(priv, param);
+ return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
@@ -445,7 +443,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
unsigned int count = ch->combined_count;
struct mlx5e_params new_params;
bool arfs_enabled;
- int rss_cnt;
bool opened;
int err = 0;
@@ -499,17 +496,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
goto out;
}
- /* Don't allow changing the number of channels if non-default RSS contexts exist,
- * the kernel doesn't protect against set_channels operations that break them.
- */
- rss_cnt = mlx5e_rx_res_rss_cnt(priv->rx_res) - 1;
- if (rss_cnt) {
- err = -EINVAL;
- netdev_err(priv->netdev, "%s: Non-default RSS contexts exist (%d), cannot change the number of channels\n",
- __func__, rss_cnt);
- goto out;
- }
-
/* Don't allow changing the number of channels if MQPRIO mode channel offload is active,
* because it defines a partition over the channels queues.
*/
@@ -557,12 +543,15 @@ static int mlx5e_set_channels(struct net_device *dev,
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
- struct kernel_ethtool_coalesce *kernel_coal)
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct dim_cq_moder *rx_moder, *tx_moder;
- if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
+ if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) {
+ NL_SET_ERR_MSG_MOD(extack, "CQ moderation not supported");
return -EOPNOTSUPP;
+ }
rx_moder = &priv->channels.params.rx_cq_moderation;
coal->rx_coalesce_usecs = rx_moder->usec;
@@ -586,7 +575,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
}
static int mlx5e_ethtool_get_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue,
@@ -708,26 +697,34 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
int err = 0;
if (!MLX5_CAP_GEN(mdev, cq_moderation) ||
- !MLX5_CAP_GEN(mdev, cq_period_mode_modify))
+ !MLX5_CAP_GEN(mdev, cq_period_mode_modify)) {
+ NL_SET_ERR_MSG_MOD(extack, "CQ moderation not supported");
return -EOPNOTSUPP;
+ }
if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) {
- netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n",
- __func__, MLX5E_MAX_COAL_TIME);
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Max coalesce time %lu usecs, tx-usecs (%u) rx-usecs (%u)",
+ MLX5E_MAX_COAL_TIME, coal->tx_coalesce_usecs,
+ coal->rx_coalesce_usecs);
return -ERANGE;
}
if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES ||
coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) {
- netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n",
- __func__, MLX5E_MAX_COAL_FRAMES);
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Max coalesce frames %lu, tx-frames (%u) rx-frames (%u)",
+ MLX5E_MAX_COAL_FRAMES, coal->tx_max_coalesced_frames,
+ coal->rx_max_coalesced_frames);
return -ERANGE;
}
if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
!MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) {
- NL_SET_ERR_MSG_MOD(extack, "cqe_mode_rx/tx is not supported on this device");
+ NL_SET_ERR_MSG_MOD(extack, "cqe-mode-rx/tx is not supported on this device");
return -EOPNOTSUPP;
}
@@ -1299,7 +1296,8 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
u32 i, ptys_modes = 0;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (*ptys2legacy_ethtool_table[i].advertised == 0)
+ if (bitmap_empty(ptys2legacy_ethtool_table[i].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
continue;
if (bitmap_intersects(ptys2legacy_ethtool_table[i].advertised,
link_modes,
@@ -1313,18 +1311,18 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
- unsigned long modes[2];
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
for (i = 0; i < MLX5E_EXT_LINK_MODES_NUMBER; ++i) {
- if (ptys2ext_ethtool_table[i].advertised[0] == 0 &&
- ptys2ext_ethtool_table[i].advertised[1] == 0)
+ if (bitmap_empty(ptys2ext_ethtool_table[i].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
continue;
- memset(modes, 0, sizeof(modes));
+ bitmap_zero(modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(modes, ptys2ext_ethtool_table[i].advertised,
link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
- if (modes[0] == ptys2ext_ethtool_table[i].advertised[0] &&
- modes[1] == ptys2ext_ethtool_table[i].advertised[1])
+ if (bitmap_equal(modes, ptys2ext_ethtool_table[i].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
ptys_modes |= MLX5E_PROT_MASK(i);
}
return ptys_modes;
@@ -2015,8 +2013,10 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
if (size_read == -EINVAL)
return -EINVAL;
if (size_read < 0) {
- netdev_err(priv->netdev, "%s: mlx5_query_module_eeprom_by_page failed:0x%x\n",
- __func__, size_read);
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Query module eeprom by page failed, read %u bytes, err %d\n",
+ i, size_read);
return i;
}
@@ -2605,6 +2605,7 @@ static void mlx5e_get_ts_stats(struct net_device *netdev,
const struct ethtool_ops mlx5e_ethtool_ops = {
.cap_rss_ctx_supported = true,
+ .rxfh_per_ctx_key = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 3eccdadc0357..773624bb2c5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -734,7 +734,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
if (num_tuples <= 0) {
netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
__func__, num_tuples);
- return num_tuples;
+ return num_tuples < 0 ? num_tuples : -EINVAL;
}
eth_ft = get_flow_table(priv, fs, num_tuples);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6f686fabed44..e601324a690a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1016,30 +1016,31 @@ err_rq_xdp_prog:
static void mlx5e_free_rq(struct mlx5e_rq *rq)
{
- struct bpf_prog *old_prog;
-
- if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
- old_prog = rcu_dereference_protected(rq->xdp_prog,
- lockdep_is_held(&rq->priv->state_lock));
- if (old_prog)
- bpf_prog_put(old_prog);
- }
+ kvfree(rq->dim);
+ page_pool_destroy(rq->page_pool);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ mlx5e_rq_free_shampo(rq);
kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
mlx5e_free_mpwqe_rq_drop_page(rq);
- mlx5e_rq_free_shampo(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
mlx5e_free_wqe_alloc_info(rq);
}
- kvfree(rq->dim);
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
+
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
+ struct bpf_prog *old_prog;
+
+ old_prog = rcu_dereference_protected(rq->xdp_prog,
+ lockdep_is_held(&rq->priv->state_lock));
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
}
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter)
@@ -1236,6 +1237,14 @@ void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
rq->mpwqe.actual_wq_head = wq->head;
rq->mpwqe.umr_in_progress = 0;
rq->mpwqe.umr_completed = 0;
+
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u16 len;
+
+ len = (shampo->pi - shampo->ci) & shampo->hd_per_wq;
+ mlx5e_shampo_fill_umr(rq, len);
+ }
}
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
@@ -3020,15 +3029,18 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
struct mlx5e_params *params)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- int num_comp_vectors, ix, irq;
-
- num_comp_vectors = mlx5_comp_vectors_max(mdev);
+ int ix;
for (ix = 0; ix < params->num_channels; ix++) {
+ int num_comp_vectors, irq, vec_ix;
+ struct mlx5_core_dev *mdev;
+
+ mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
+ num_comp_vectors = mlx5_comp_vectors_max(mdev);
cpumask_clear(priv->scratchpad.cpumask);
+ vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
- for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
+ for (irq = vec_ix; irq < num_comp_vectors; irq += params->num_channels) {
int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
@@ -4403,9 +4415,9 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
- features |= NETIF_F_NETNS_LOCAL;
+ netdev->netns_local = true;
} else {
- features &= ~NETIF_F_NETNS_LOCAL;
+ netdev->netns_local = false;
}
mutex_unlock(&priv->state_lock);
@@ -5167,18 +5179,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
#endif
};
-static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
-{
- int i;
-
- /* The supported periods are organized in ascending order */
- for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
- if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
- break;
-
- return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
-}
-
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
{
struct mlx5e_params *params = &priv->channels.params;
@@ -5308,7 +5308,7 @@ static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i,
struct mlx5e_rq_stats *rq_stats;
ASSERT_RTNL();
- if (mlx5e_is_uplink_rep(priv))
+ if (mlx5e_is_uplink_rep(priv) || !priv->stats_nch)
return;
channel_stats = priv->channel_stats[i];
@@ -5328,6 +5328,9 @@ static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i,
struct mlx5e_sq_stats *sq_stats;
ASSERT_RTNL();
+ if (!priv->stats_nch)
+ return;
+
/* no special case needed for ptp htb etc since txq2sq_stats is kept up
* to date for active sq_stats, otherwise get_base_stats takes care of
* inactive sqs.
@@ -6506,7 +6509,9 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
_mlx5e_suspend(adev, false);
- priv->profile->cleanup(priv);
+ /* Avoid cleanup if profile rollback failed. */
+ if (priv->profile)
+ priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
mlx5e_devlink_port_unregister(mlx5e_dev);
mlx5e_destroy_devlink(mlx5e_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 8790d57dc6db..92094bf60d59 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -360,7 +360,7 @@ mlx5e_rep_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- return mlx5e_ethtool_set_ringparam(priv, param);
+ return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
static void mlx5e_rep_get_channels(struct net_device *dev,
@@ -386,7 +386,7 @@ static int mlx5e_rep_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
}
static int mlx5e_rep_set_coalesce(struct net_device *netdev,
@@ -898,7 +898,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->hw_features |= NETIF_F_RXCSUM;
netdev->features |= netdev->hw_features;
- netdev->features |= NETIF_F_NETNS_LOCAL;
+
+ netdev->netns_local = true;
}
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 225da8d691fc..8e24ba96c779 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -735,6 +735,7 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
ksm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
+ ksm_entries = ALIGN_DOWN(ksm_entries, MLX5E_SHAMPO_WQ_HEADER_PER_PAGE);
if (!ksm_entries)
return 0;
@@ -962,26 +963,31 @@ void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
sq->cc = sqcc;
}
-static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
- struct mlx5e_icosq *sq)
+void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len)
{
- struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
- struct mlx5e_shampo_hd *shampo;
- /* assume 1:1 relationship between RQ and icosq */
- struct mlx5e_rq *rq = &c->rq;
- int end, from, len = umr.len;
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ int end, from, full_len = len;
- shampo = rq->mpwqe.shampo;
end = shampo->hd_per_wq;
from = shampo->ci;
- if (from + len > shampo->hd_per_wq) {
+ if (from + len > end) {
len -= end - from;
bitmap_set(shampo->bitmap, from, end - from);
from = 0;
}
bitmap_set(shampo->bitmap, from, len);
- shampo->ci = (shampo->ci + umr.len) & (shampo->hd_per_wq - 1);
+ shampo->ci = (shampo->ci + full_len) & (shampo->hd_per_wq - 1);
+}
+
+static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
+ struct mlx5e_icosq *sq)
+{
+ struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
+ /* assume 1:1 relationship between RQ and icosq */
+ struct mlx5e_rq *rq = &c->rq;
+
+ mlx5e_shampo_fill_umr(rq, umr.len);
}
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
@@ -2340,6 +2346,9 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
stats->hds_nodata_packets++;
stats->hds_nodata_bytes += head_size;
}
+ } else {
+ stats->hds_nosplit_packets++;
+ stats->hds_nosplit_bytes += data_bcnt;
}
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index e7a3290a708a..611ec4b6f370 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -144,6 +144,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
@@ -347,6 +349,8 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_gro_large_hds += rq_stats->gro_large_hds;
s->rx_hds_nodata_packets += rq_stats->hds_nodata_packets;
s->rx_hds_nodata_bytes += rq_stats->hds_nodata_bytes;
+ s->rx_hds_nosplit_packets += rq_stats->hds_nosplit_packets;
+ s->rx_hds_nosplit_bytes += rq_stats->hds_nosplit_bytes;
s->rx_ecn_mark += rq_stats->ecn_mark;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
@@ -2062,6 +2066,8 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 4c5858c1dd82..5961c569cfe0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -156,6 +156,8 @@ struct mlx5e_sw_stats {
u64 rx_gro_large_hds;
u64 rx_hds_nodata_packets;
u64 rx_hds_nodata_bytes;
+ u64 rx_hds_nosplit_packets;
+ u64 rx_hds_nosplit_bytes;
u64 rx_mcast_packets;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
@@ -356,6 +358,8 @@ struct mlx5e_rq_stats {
u64 gro_large_hds;
u64 hds_nodata_packets;
u64 hds_nodata_bytes;
+ u64 hds_nosplit_packets;
+ u64 hds_nosplit_bytes;
u64 mcast_packets;
u64 ecn_mark;
u64 removed_vlan_packets;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 30673292e15f..6b3b1afe8312 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1740,10 +1740,118 @@ has_encap_dests(struct mlx5_flow_attr *attr)
}
static int
+extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
+{
+ bool int_dest = false, ext_dest = false;
+ struct mlx5_esw_flow_attr *esw_attr;
+ int i;
+
+ if (flow->attr != attr ||
+ !list_is_first(&attr->list, &flow->attrs))
+ return 0;
+
+ if (flow_flag_test(flow, SLOW))
+ return 0;
+
+ esw_attr = attr->esw_attr;
+ if (!esw_attr->split_count ||
+ esw_attr->split_count == esw_attr->out_count - 1)
+ return 0;
+
+ if (esw_attr->dest_int_port &&
+ (esw_attr->dests[esw_attr->split_count].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE))
+ return esw_attr->split_count + 1;
+
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
+ /* external dest with encap is considered as internal by firmware */
+ if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK &&
+ !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID))
+ ext_dest = true;
+ else
+ int_dest = true;
+
+ if (ext_dest && int_dest)
+ return esw_attr->split_count;
+ }
+
+ return 0;
+}
+
+static int
+extra_split_attr_dests(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr, int split_count)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5e_tc_flow_parse_attr *parse_attr, *parse_attr2;
+ struct mlx5_esw_flow_attr *esw_attr, *esw_attr2;
+ struct mlx5e_post_act_handle *handle;
+ struct mlx5_flow_attr *attr2;
+ int i, j, err;
+
+ if (IS_ERR(post_act))
+ return PTR_ERR(post_act);
+
+ attr2 = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
+ parse_attr2 = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
+ if (!attr2 || !parse_attr2) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+ attr2->parse_attr = parse_attr2;
+
+ handle = mlx5e_tc_post_act_add(post_act, attr2);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto err_free;
+ }
+
+ esw_attr = attr->esw_attr;
+ esw_attr2 = attr2->esw_attr;
+ esw_attr2->in_rep = esw_attr->in_rep;
+
+ parse_attr = attr->parse_attr;
+ parse_attr2->filter_dev = parse_attr->filter_dev;
+
+ for (i = split_count, j = 0; i < esw_attr->out_count; i++, j++)
+ esw_attr2->dests[j] = esw_attr->dests[i];
+
+ esw_attr2->out_count = j;
+ attr2->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ err = mlx5e_tc_post_act_offload(post_act, handle);
+ if (err)
+ goto err_post_act_offload;
+
+ err = mlx5e_tc_post_act_set_handle(flow->priv->mdev, handle,
+ &parse_attr->mod_hdr_acts);
+ if (err)
+ goto err_post_act_set_handle;
+
+ esw_attr->out_count = split_count;
+ attr->extra_split_ft = mlx5e_tc_post_act_get_ft(post_act);
+ flow->extra_split_attr = attr2;
+
+ attr2->post_act_handle = handle;
+
+ return 0;
+
+err_post_act_set_handle:
+ mlx5e_tc_post_act_unoffload(post_act, handle);
+err_post_act_offload:
+ mlx5e_tc_post_act_del(post_act, handle);
+err_free:
+ kvfree(parse_attr2);
+ kfree(attr2);
+ return err;
+}
+
+static int
post_process_attr(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack)
{
+ int extra_split;
bool vf_tun;
int err = 0;
@@ -1757,6 +1865,13 @@ post_process_attr(struct mlx5e_tc_flow *flow,
goto err_out;
}
+ extra_split = extra_split_attr_dests_needed(flow, attr);
+ if (extra_split > 0) {
+ err = extra_split_attr_dests(flow, attr, extra_split);
+ if (err)
+ goto err_out;
+ }
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
if (err)
@@ -1971,6 +2086,11 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
free_flow_post_acts(flow);
+ if (flow->extra_split_attr) {
+ mlx5_free_flow_attr_actions(flow, flow->extra_split_attr);
+ kvfree(flow->extra_split_attr->parse_attr);
+ kfree(flow->extra_split_attr);
+ }
mlx5_free_flow_attr_actions(flow, attr);
kvfree(attr->esw_attr->rx_tun_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index c24bda56b2b5..e1b8cb78369f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -86,6 +86,7 @@ struct mlx5_flow_attr {
u32 dest_chain;
struct mlx5_flow_table *ft;
struct mlx5_flow_table *dest_ft;
+ struct mlx5_flow_table *extra_split_ft;
u8 inner_match_level;
u8 outer_match_level;
u8 tun_ip_version;
@@ -139,7 +140,7 @@ struct mlx5_rx_tun_attr {
#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
#define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
-#define MLX5E_TC_MAX_INT_PORT_NUM (8)
+#define MLX5E_TC_MAX_INT_PORT_NUM (32)
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index b09e9abd39f3..f8c7912abe0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -642,7 +642,6 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
return;
err_unmap:
- mlx5e_dma_unmap_wqe_err(sq, 1);
sq->stats->dropped++;
dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index cb7e7e4104af..68cb86b37e56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -835,28 +835,9 @@ static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
mlx5_irq_release_vector(irq);
}
-static int mlx5_cpumask_default_spread(int numa_node, int index)
+static int mlx5_cpumask_default_spread(struct mlx5_core_dev *dev, int index)
{
- const struct cpumask *prev = cpu_none_mask;
- const struct cpumask *mask;
- int found_cpu = 0;
- int i = 0;
- int cpu;
-
- rcu_read_lock();
- for_each_numa_hop_mask(mask, numa_node) {
- for_each_cpu_andnot(cpu, mask, prev) {
- if (i++ == index) {
- found_cpu = cpu;
- goto spread_done;
- }
- }
- prev = mask;
- }
-
-spread_done:
- rcu_read_unlock();
- return found_cpu;
+ return cpumask_local_spread(index, dev->priv.numa_node);
}
static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
@@ -880,7 +861,7 @@ static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
int cpu;
rmap = mlx5_eq_table_get_pci_rmap(dev);
- cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx);
+ cpu = mlx5_cpumask_default_spread(dev, vecidx);
irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
if (IS_ERR(irq))
return PTR_ERR(irq);
@@ -915,7 +896,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
if (!mlx5_irq_pool_is_sf_pool(pool))
return comp_irq_request_pci(dev, vecidx);
- af_desc.is_managed = 1;
+ af_desc.is_managed = false;
cpumask_copy(&af_desc.mask, cpu_online_mask);
cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
@@ -1080,6 +1061,12 @@ int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
struct mlx5_eq_comp *eq;
int ret = 0;
+ if (vecidx >= table->max_comp_eqs) {
+ mlx5_core_dbg(dev, "Requested vector index %u should be less than %u",
+ vecidx, table->max_comp_eqs);
+ return -EINVAL;
+ }
+
mutex_lock(&table->comp_lock);
eq = xa_load(&table->comp_eqs, vecidx);
if (eq) {
@@ -1145,7 +1132,7 @@ int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
if (mask)
cpu = cpumask_first(mask);
else
- cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector);
+ cpu = mlx5_cpumask_default_spread(dev, vector);
return cpu;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 255bc8b749f9..8587cd572da5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -319,7 +319,7 @@ int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
return -EPERM;
mutex_lock(&esw->state_lock);
- if (esw->mode != MLX5_ESWITCH_LEGACY) {
+ if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) {
err = -EOPNOTSUPP;
goto out;
}
@@ -339,7 +339,7 @@ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
if (!mlx5_esw_allowed(esw))
return -EPERM;
- if (esw->mode != MLX5_ESWITCH_LEGACY)
+ if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw))
return -EOPNOTSUPP;
*setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 20146a2dc7f4..02a3563f51ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -312,6 +312,25 @@ static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw,
return err;
}
+static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
+{
+ switch (type) {
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_TSAR;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_VPORT;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_VPORT_TC;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+ }
+ return false;
+}
+
static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
struct mlx5_vport *vport,
u32 max_rate, u32 bw_share)
@@ -323,6 +342,9 @@ static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
void *vport_elem;
int err;
+ if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT))
+ return -EOPNOTSUPP;
+
parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
@@ -421,6 +443,7 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_esw_rate_group *group;
+ __be32 *attr;
u32 divider;
int err;
@@ -428,6 +451,12 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex
if (!group)
return ERR_PTR(-ENOMEM);
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
+
MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
esw->qos.root_tsar_ix);
err = mlx5_create_scheduling_element_cmd(esw->dev,
@@ -526,25 +555,6 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
return err;
}
-static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
-{
- switch (type) {
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_TSAR;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_VPORT;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_VPORT_TC;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
- }
- return false;
-}
-
static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
@@ -555,7 +565,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return -EOPNOTSUPP;
- if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
+ if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR) ||
+ !(MLX5_CAP_QOS(dev, esw_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
return -EOPNOTSUPP;
MLX5_SET(scheduling_context, tsar_ctx, element_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 17f78091ad30..7aef30dbd82d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1489,7 +1489,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
}
if (err)
- goto abort;
+ goto err_esw_enable;
esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
@@ -1503,7 +1503,8 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
return 0;
-abort:
+err_esw_enable:
+ mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
mlx5_esw_acls_ns_cleanup(esw);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 578466d69f21..f44b4c7ebcfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -887,9 +887,6 @@ int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_v
bool enable);
int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
u16 vport_num);
-void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw);
-void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw);
-
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 768199d2255a..f24f91d213f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -613,6 +613,13 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
}
}
+ if (attr->extra_split_ft) {
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[*i].ft = attr->extra_split_ft;
+ (*i)++;
+ }
+
out:
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 9b8599c200e2..676005854dad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -463,7 +463,7 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
int num_encap = 0;
*extended_dest = false;
- if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ if (!(fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -502,17 +502,17 @@ mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
execute_aso[0]);
MLX5_SET(execute_aso, execute_aso, valid, 1);
MLX5_SET(execute_aso, execute_aso, aso_object_id,
- fte->action.exe_aso.object_id);
+ fte->act_dests.action.exe_aso.object_id);
exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
- fte->action.exe_aso.return_reg_id);
+ fte->act_dests.action.exe_aso.return_reg_id);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
- fte->action.exe_aso.type);
+ fte->act_dests.action.exe_aso.type);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
- fte->action.exe_aso.flow_meter.init_color);
+ fte->act_dests.action.exe_aso.flow_meter.init_color);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
- fte->action.exe_aso.flow_meter.meter_idx);
+ fte->act_dests.action.exe_aso.flow_meter.meter_idx);
}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
@@ -541,7 +541,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
else
dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
- inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
+ inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->act_dests.dests_size * dst_cnt_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -553,7 +553,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
MLX5_SET(set_fte_in, in, ignore_flow_level,
- !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
+ !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport,
@@ -563,23 +563,23 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag,
- fte->flow_context.flow_tag);
+ fte->act_dests.flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source,
- fte->flow_context.flow_source);
+ fte->act_dests.flow_context.flow_source);
MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
- !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
+ !!(fte->act_dests.flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
- action = fte->action.action;
+ action = fte->act_dests.action.action;
if (extended_dest)
action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
MLX5_SET(flow_context, in_flow_context, action, action);
- if (!extended_dest && fte->action.pkt_reformat) {
- struct mlx5_pkt_reformat *pkt_reformat = fte->action.pkt_reformat;
+ if (!extended_dest && fte->act_dests.action.pkt_reformat) {
+ struct mlx5_pkt_reformat *pkt_reformat = fte->act_dests.action.pkt_reformat;
if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat);
@@ -591,46 +591,46 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
goto err_out;
}
} else {
- reformat_id = fte->action.pkt_reformat->id;
+ reformat_id = fte->act_dests.action.pkt_reformat->id;
}
}
MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id);
- if (fte->action.modify_hdr) {
- if (fte->action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
+ if (fte->act_dests.action.modify_hdr) {
+ if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n");
err = -EOPNOTSUPP;
goto err_out;
}
MLX5_SET(flow_context, in_flow_context, modify_header_id,
- fte->action.modify_hdr->id);
+ fte->act_dests.action.modify_hdr->id);
}
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
- fte->action.crypto.type);
+ fte->act_dests.action.crypto.type);
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id,
- fte->action.crypto.obj_id);
+ fte->act_dests.action.crypto.obj_id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
- MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
- MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
- MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
+ MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[0].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[0].vid);
+ MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[0].prio);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
- MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
- MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
- MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
+ MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[1].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[1].vid);
+ MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[1].prio);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, sizeof(fte->val));
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -706,7 +706,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
log_max_flow_counter,
ft->type));
@@ -731,8 +731,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
- if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte->act_dests.action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
} else {
err = -EOPNOTSUPP;
@@ -1071,7 +1071,7 @@ static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
- return 0;
+ return MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH;
}
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 53e0e5137d3f..7eb7b3ffe3d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -124,4 +124,12 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode);
int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect);
+
+static inline bool mlx5_fs_cmd_is_fw_term_table(struct mlx5_flow_table *ft)
+{
+ if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
+ return true;
+
+ return false;
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index a47d6419160d..8505d5e241e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -605,12 +605,37 @@ static void modify_fte(struct fs_fte *fte)
dev = get_dev(&fte->node);
root = find_root(&ft->node);
- err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
+ err = root->cmds->update_fte(root, ft, fg, fte->act_dests.modify_mask, fte);
if (err)
mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
- fte->modify_mask = 0;
+ fte->act_dests.modify_mask = 0;
+}
+
+static void del_sw_hw_dup_rule(struct fs_node *node)
+{
+ struct mlx5_flow_rule *rule;
+ struct fs_fte *fte;
+
+ fs_get_obj(rule, node);
+ fs_get_obj(fte, rule->node.parent);
+ trace_mlx5_fs_del_rule(rule);
+
+ if (is_fwd_next_action(rule->sw_action)) {
+ mutex_lock(&rule->dest_attr.ft->lock);
+ list_del(&rule->next_ft);
+ mutex_unlock(&rule->dest_attr.ft->lock);
+ }
+
+ /* If a pending rule is being deleted it means
+ * this is a NO APPEND rule, so there are no partial deletions,
+ * all the rules of the mlx5_flow_handle are going to be deleted
+ * and the rules aren't shared with any other mlx5_flow_handle instance
+ * so no need to do any bookkeeping like in del_sw_hw_rule().
+ */
+
+ kfree(rule);
}
static void del_sw_hw_rule(struct fs_node *node)
@@ -628,29 +653,29 @@ static void del_sw_hw_rule(struct fs_node *node)
}
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
- --fte->dests_size;
- fte->modify_mask |=
+ --fte->act_dests.dests_size;
+ fte->act_dests.modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
- fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
goto out;
}
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
- --fte->dests_size;
- fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
- fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ --fte->act_dests.dests_size;
+ fte->act_dests.modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+ fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
goto out;
}
if (is_fwd_dest_type(rule->dest_attr.type)) {
- --fte->dests_size;
- --fte->fwd_dests;
+ --fte->act_dests.dests_size;
+ --fte->act_dests.fwd_dests;
- if (!fte->fwd_dests)
- fte->action.action &=
+ if (!fte->act_dests.fwd_dests)
+ fte->act_dests.action.action &=
~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- fte->modify_mask |=
+ fte->act_dests.modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
goto out;
}
@@ -658,12 +683,33 @@ out:
kfree(rule);
}
+static void switch_to_pending_act_dests(struct fs_fte *fte)
+{
+ struct fs_node *iter;
+
+ memcpy(&fte->act_dests, &fte->dup->act_dests, sizeof(fte->act_dests));
+
+ list_bulk_move_tail(&fte->node.children,
+ fte->dup->children.next,
+ fte->dup->children.prev);
+
+ list_for_each_entry(iter, &fte->node.children, list)
+ iter->del_sw_func = del_sw_hw_rule;
+
+ /* Make sure the fte isn't deleted
+ * as mlx5_del_flow_rules() decreases the refcount
+ * of the fte to trigger deletion.
+ */
+ tree_get_node(&fte->node);
+}
+
static void del_hw_fte(struct fs_node *node)
{
struct mlx5_flow_root_namespace *root;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct mlx5_core_dev *dev;
+ bool pending_used = false;
struct fs_fte *fte;
int err;
@@ -672,16 +718,35 @@ static void del_hw_fte(struct fs_node *node)
fs_get_obj(ft, fg->node.parent);
trace_mlx5_fs_del_fte(fte);
- WARN_ON(fte->dests_size);
+ WARN_ON(fte->act_dests.dests_size);
dev = get_dev(&ft->node);
root = find_root(&ft->node);
+
+ if (fte->dup && !list_empty(&fte->dup->children)) {
+ switch_to_pending_act_dests(fte);
+ pending_used = true;
+ } else {
+ /* Avoid double call to del_hw_fte */
+ node->del_hw_func = NULL;
+ }
+
if (node->active) {
- err = root->cmds->delete_fte(root, ft, fte);
- if (err)
- mlx5_core_warn(dev,
- "flow steering can't delete fte in index %d of flow group id %d\n",
- fte->index, fg->id);
- node->active = false;
+ if (pending_used) {
+ err = root->cmds->update_fte(root, ft, fg,
+ fte->act_dests.modify_mask, fte);
+ if (err)
+ mlx5_core_warn(dev,
+ "flow steering can't update to pending rule in index %d of flow group id %d\n",
+ fte->index, fg->id);
+ fte->act_dests.modify_mask = 0;
+ } else {
+ err = root->cmds->delete_fte(root, ft, fte);
+ if (err)
+ mlx5_core_warn(dev,
+ "flow steering can't delete fte in index %d of flow group id %d\n",
+ fte->index, fg->id);
+ node->active = false;
+ }
}
}
@@ -700,6 +765,7 @@ static void del_sw_fte(struct fs_node *node)
rhash_fte);
WARN_ON(err);
ida_free(&fg->fte_allocator, fte->index - fg->start_index);
+ kvfree(fte->dup);
kmem_cache_free(steering->ftes_cache, fte);
}
@@ -782,8 +848,8 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
memcpy(fte->val, &spec->match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY;
- fte->action = *flow_act;
- fte->flow_context = spec->flow_context;
+ fte->act_dests.action = *flow_act;
+ fte->act_dests.flow_context = spec->flow_context;
tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
@@ -1103,18 +1169,45 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
return err;
}
+static bool rule_is_pending(struct fs_fte *fte, struct mlx5_flow_rule *rule)
+{
+ struct mlx5_flow_rule *tmp_rule;
+ struct fs_node *iter;
+
+ if (!fte->dup || list_empty(&fte->dup->children))
+ return false;
+
+ list_for_each_entry(iter, &fte->dup->children, list) {
+ tmp_rule = container_of(iter, struct mlx5_flow_rule, node);
+
+ if (tmp_rule == rule)
+ return true;
+ }
+
+ return false;
+}
+
static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_root_namespace *root;
+ struct fs_fte_action *act_dests;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
+ bool pending = false;
struct fs_fte *fte;
int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
int err = 0;
fs_get_obj(fte, rule->node.parent);
- if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+
+ pending = rule_is_pending(fte, rule);
+ if (pending)
+ act_dests = &fte->dup->act_dests;
+ else
+ act_dests = &fte->act_dests;
+
+ if (!(act_dests->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return -EINVAL;
down_write_ref_node(&fte->node, false);
fs_get_obj(fg, fte->node.parent);
@@ -1122,8 +1215,9 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
memcpy(&rule->dest_attr, dest, sizeof(*dest));
root = find_root(&ft->node);
- err = root->cmds->update_fte(root, ft, fg,
- modify_mask, fte);
+ if (!pending)
+ err = root->cmds->update_fte(root, ft, fg,
+ modify_mask, fte);
up_write_ref_node(&fte->node, false);
return err;
@@ -1453,6 +1547,16 @@ static struct mlx5_flow_handle *alloc_handle(int num_rules)
return handle;
}
+static void destroy_flow_handle_dup(struct mlx5_flow_handle *handle,
+ int i)
+{
+ for (; --i >= 0;) {
+ list_del(&handle->rule[i]->node.list);
+ kfree(handle->rule[i]);
+ }
+ kfree(handle);
+}
+
static void destroy_flow_handle(struct fs_fte *fte,
struct mlx5_flow_handle *handle,
struct mlx5_flow_destination *dest,
@@ -1460,7 +1564,7 @@ static void destroy_flow_handle(struct fs_fte *fte,
{
for (; --i >= 0;) {
if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
- fte->dests_size--;
+ fte->act_dests.dests_size--;
list_del(&handle->rule[i]->node.list);
kfree(handle->rule[i]);
}
@@ -1469,6 +1573,61 @@ static void destroy_flow_handle(struct fs_fte *fte,
}
static struct mlx5_flow_handle *
+create_flow_handle_dup(struct list_head *children,
+ struct mlx5_flow_destination *dest,
+ int dest_num,
+ struct fs_fte_action *act_dests)
+{
+ static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+ static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ struct mlx5_flow_rule *rule = NULL;
+ struct mlx5_flow_handle *handle;
+ int i = 0;
+ int type;
+
+ handle = alloc_handle((dest_num) ? dest_num : 1);
+ if (!handle)
+ return NULL;
+
+ do {
+ rule = alloc_rule(dest + i);
+ if (!rule)
+ goto free_rules;
+
+ /* Add dest to dests list- we need flow tables to be in the
+ * end of the list for forward to next prio rules.
+ */
+ tree_init_node(&rule->node, NULL, del_sw_hw_dup_rule);
+ if (dest &&
+ dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ list_add(&rule->node.list, children);
+ else
+ list_add_tail(&rule->node.list, children);
+
+ if (dest) {
+ act_dests->dests_size++;
+
+ if (is_fwd_dest_type(dest[i].type))
+ act_dests->fwd_dests++;
+
+ type = dest[i].type ==
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ act_dests->modify_mask |= type ? count : dst;
+ }
+ handle->rule[i] = rule;
+ } while (++i < dest_num);
+
+ return handle;
+
+free_rules:
+ destroy_flow_handle_dup(handle, i);
+ act_dests->dests_size = 0;
+ act_dests->fwd_dests = 0;
+
+ return NULL;
+}
+
+static struct mlx5_flow_handle *
create_flow_handle(struct fs_fte *fte,
struct mlx5_flow_destination *dest,
int dest_num,
@@ -1510,10 +1669,10 @@ create_flow_handle(struct fs_fte *fte,
else
list_add_tail(&rule->node.list, &fte->node.children);
if (dest) {
- fte->dests_size++;
+ fte->act_dests.dests_size++;
if (is_fwd_dest_type(dest[i].type))
- fte->fwd_dests++;
+ fte->act_dests.fwd_dests++;
type = dest[i].type ==
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -1774,17 +1933,17 @@ static int check_conflicting_ftes(struct fs_fte *fte,
const struct mlx5_flow_context *flow_context,
const struct mlx5_flow_act *flow_act)
{
- if (check_conflicting_actions(flow_act, &fte->action)) {
+ if (check_conflicting_actions(flow_act, &fte->act_dests.action)) {
mlx5_core_warn(get_dev(&fte->node),
"Found two FTEs with conflicting actions\n");
return -EEXIST;
}
if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
- fte->flow_context.flow_tag != flow_context->flow_tag) {
+ fte->act_dests.flow_context.flow_tag != flow_context->flow_tag) {
mlx5_core_warn(get_dev(&fte->node),
"FTE flow tag %u already exists with different flow tag %u\n",
- fte->flow_context.flow_tag,
+ fte->act_dests.flow_context.flow_tag,
flow_context->flow_tag);
return -EEXIST;
}
@@ -1808,12 +1967,12 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
if (ret)
return ERR_PTR(ret);
- old_action = fte->action.action;
- fte->action.action |= flow_act->action;
+ old_action = fte->act_dests.action.action;
+ fte->act_dests.action.action |= flow_act->action;
handle = add_rule_fte(fte, fg, dest, dest_num,
old_action != flow_act->action);
if (IS_ERR(handle)) {
- fte->action.action = old_action;
+ fte->act_dests.action.action = old_action;
return handle;
}
trace_mlx5_fs_set_fte(fte, false);
@@ -1961,6 +2120,62 @@ out:
return fte_tmp;
}
+/* Native capability lacks support for adding an additional match with the same value
+ * to the same flow group. To accommodate the NO APPEND flag in these scenarios,
+ * we include the new rule in the existing flow table entry (fte) without immediate
+ * hardware commitment. When a request is made to delete the corresponding hardware rule,
+ * we then commit the pending rule to hardware.
+ */
+static struct mlx5_flow_handle *
+add_rule_dup_match_fte(struct fs_fte *fte,
+ const struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num)
+{
+ struct mlx5_flow_handle *handle;
+ struct fs_fte_dup *dup;
+ int i = 0;
+
+ if (!fte->dup) {
+ dup = kvzalloc(sizeof(*dup), GFP_KERNEL);
+ if (!dup)
+ return ERR_PTR(-ENOMEM);
+ /* dup will be freed when the fte is freed
+ * this way we don't allocate / free dup on every rule deletion
+ * or creation
+ */
+ INIT_LIST_HEAD(&dup->children);
+ fte->dup = dup;
+ }
+
+ if (!list_empty(&fte->dup->children)) {
+ mlx5_core_warn(get_dev(&fte->node),
+ "Can have only a single duplicate rule\n");
+
+ return ERR_PTR(-EEXIST);
+ }
+
+ fte->dup->act_dests.action = *flow_act;
+ fte->dup->act_dests.flow_context = spec->flow_context;
+ fte->dup->act_dests.dests_size = 0;
+ fte->dup->act_dests.fwd_dests = 0;
+ fte->dup->act_dests.modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+
+ handle = create_flow_handle_dup(&fte->dup->children,
+ dest, dest_num,
+ &fte->dup->act_dests);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < handle->num_rules; i++) {
+ tree_add_node(&handle->rule[i]->node, &fte->node);
+ trace_mlx5_fs_add_rule(handle->rule[i]);
+ }
+
+ return handle;
+}
+
static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct list_head *match_head,
@@ -1971,6 +2186,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
int ft_version)
{
struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule;
struct match_list *iter;
@@ -1984,7 +2200,9 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
return ERR_PTR(-ENOMEM);
search_again_locked:
- if (flow_act->flags & FLOW_ACT_NO_APPEND)
+ if (flow_act->flags & FLOW_ACT_NO_APPEND &&
+ (root->cmds->get_capabilities(root, root->table_type) &
+ MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH))
goto skip_search;
version = matched_fgs_get_version(match_head);
/* Try to find an fte with identical match value and attempt update its
@@ -1997,7 +2215,10 @@ search_again_locked:
fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
if (!fte_tmp)
continue;
- rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
+ if (flow_act->flags & FLOW_ACT_NO_APPEND)
+ rule = add_rule_dup_match_fte(fte_tmp, spec, flow_act, dest, dest_num);
+ else
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
/* No error check needed here, because insert_fte() is not called */
up_write_ref_node(&fte_tmp->node, false);
tree_put_node(&fte_tmp->node, false);
@@ -2265,12 +2486,10 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
tree_remove_node(&handle->rule[i]->node, true);
if (list_empty(&fte->node.children)) {
fte->node.del_hw_func(&fte->node);
- /* Avoid double call to del_hw_fte */
- fte->node.del_hw_func = NULL;
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
- } else if (fte->dests_size) {
- if (fte->modify_mask)
+ } else if (fte->act_dests.dests_size) {
+ if (fte->act_dests.modify_mask)
modify_fte(fte);
up_write_ref_node(&fte->node, false);
} else {
@@ -3590,8 +3809,8 @@ out:
}
EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
-static struct mlx5_flow_root_namespace
-*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
+struct mlx5_flow_root_namespace *
+mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
{
struct mlx5_flow_namespace *ns;
@@ -3614,7 +3833,7 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr;
int err;
- root = get_root_namespace(dev, ns_type);
+ root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@@ -3639,7 +3858,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
- root = get_root_namespace(dev, modify_hdr->ns_type);
+ root = mlx5_get_root_namespace(dev, modify_hdr->ns_type);
if (WARN_ON(!root))
return;
root->cmds->modify_header_dealloc(root, modify_hdr);
@@ -3655,7 +3874,7 @@ struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
struct mlx5_flow_root_namespace *root;
int err;
- root = get_root_namespace(dev, ns_type);
+ root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@@ -3681,7 +3900,7 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
- root = get_root_namespace(dev, pkt_reformat->ns_type);
+ root = mlx5_get_root_namespace(dev, pkt_reformat->ns_type);
if (WARN_ON(!root))
return;
root->cmds->packet_reformat_dealloc(root, pkt_reformat);
@@ -3703,7 +3922,7 @@ mlx5_create_match_definer(struct mlx5_core_dev *dev,
struct mlx5_flow_definer *definer;
int id;
- root = get_root_namespace(dev, ns_type);
+ root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@@ -3727,7 +3946,7 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
- root = get_root_namespace(dev, definer->ns_type);
+ root = mlx5_get_root_namespace(dev, definer->ns_type);
if (WARN_ON(!root))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 78eb6b7097e1..964937f17cf5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -110,7 +110,9 @@ enum fs_flow_table_type {
FS_FT_RDMA_RX = 0X7,
FS_FT_RDMA_TX = 0X8,
FS_FT_PORT_SEL = 0X9,
- FS_FT_MAX_TYPE = FS_FT_PORT_SEL,
+ FS_FT_FDB_RX = 0xa,
+ FS_FT_FDB_TX = 0xb,
+ FS_FT_MAX_TYPE = FS_FT_FDB_TX,
};
enum fs_flow_table_op_mod {
@@ -131,6 +133,7 @@ enum mlx5_flow_steering_capabilty {
MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0,
MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1,
MLX5_FLOW_STEERING_CAP_MATCH_RANGES = 1UL << 2,
+ MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH = 1UL << 3,
};
struct mlx5_flow_steering {
@@ -228,20 +231,29 @@ struct mlx5_ft_underlay_qp {
MLX5_BYTE_OFF(fte_match_param, \
MLX5_FTE_MATCH_PARAM_RESERVED)))
+struct fs_fte_action {
+ int modify_mask;
+ u32 dests_size;
+ u32 fwd_dests;
+ struct mlx5_flow_context flow_context;
+ struct mlx5_flow_act action;
+};
+
+struct fs_fte_dup {
+ struct list_head children;
+ struct fs_fte_action act_dests;
+};
+
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
struct mlx5_fs_dr_rule fs_dr_rule;
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
- u32 dests_size;
- u32 fwd_dests;
+ struct fs_fte_action act_dests;
+ struct fs_fte_dup *dup;
u32 index;
- struct mlx5_flow_context flow_context;
- struct mlx5_flow_act action;
enum fs_fte_status status;
- struct mlx5_fc *counter;
struct rhash_head hash;
- int modify_mask;
};
/* Type of children is mlx5_flow_table/namespace */
@@ -368,7 +380,9 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
(type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \
(type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\
+ (type == FS_FT_FDB_RX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
+ (type == FS_FT_FDB_TX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_FDB_TX != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index b61b7d966114..76ad46bf477d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -224,6 +224,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, mcam_reg)) {
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128);
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F);
+ mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9180_0x91FF);
}
if (MLX5_CAP_GEN(dev, qcam_reg))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index b43ca0b762c3..4f55e55ecb55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -26,6 +26,7 @@ struct mlx5_fw_reset {
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
unsigned long reset_flags;
+ u8 reset_method;
struct timer_list timer;
struct completion done;
int ret;
@@ -95,7 +96,7 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
}
static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
- u8 *reset_type, u8 *reset_state)
+ u8 *reset_type, u8 *reset_state, u8 *reset_method)
{
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
@@ -111,13 +112,26 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
*reset_type = MLX5_GET(mfrl_reg, out, reset_type);
if (reset_state)
*reset_state = MLX5_GET(mfrl_reg, out, reset_state);
+ if (reset_method)
+ *reset_method = MLX5_GET(mfrl_reg, out, pci_reset_req_method);
return 0;
}
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
{
- return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL);
+ return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL, NULL);
+}
+
+static int mlx5_fw_reset_get_reset_method(struct mlx5_core_dev *dev,
+ u8 *reset_method)
+{
+ if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method)) {
+ *reset_method = MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE;
+ return 0;
+ }
+
+ return mlx5_reg_mfrl_query(dev, NULL, NULL, NULL, reset_method);
}
static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
@@ -125,7 +139,7 @@ static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
{
u8 reset_state;
- if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state))
+ if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state, NULL))
goto out;
if (!reset_state)
@@ -398,7 +412,8 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
return 0;
}
-static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
+static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
+ u8 reset_method)
{
u16 dev_id;
int err;
@@ -409,9 +424,11 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
- err = mlx5_check_hotplug_interrupt(dev);
- if (err)
- return false;
+ if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
+ err = mlx5_check_hotplug_interrupt(dev);
+ if (err)
+ return false;
+ }
#endif
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
@@ -427,8 +444,12 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
- if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
- !mlx5_is_reset_now_capable(dev)) {
+ err = mlx5_fw_reset_get_reset_method(dev, &fw_reset->reset_method);
+ if (err)
+ mlx5_core_warn(dev, "Failed reading MFRL, err %d\n", err);
+
+ if (err || test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
+ !mlx5_is_reset_now_capable(dev, fw_reset->reset_method)) {
err = mlx5_fw_reset_set_reset_sync_nack(dev);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s",
err ? "Failed" : "Sent");
@@ -444,21 +465,15 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n");
}
-static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
+static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev, u16 dev_id)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
struct pci_dev *bridge = bridge_bus->self;
unsigned long timeout;
struct pci_dev *sdev;
- u16 reg16, dev_id;
int cap, err;
+ u16 reg16;
- err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
- if (err)
- return pcibios_err_to_errno(err);
- err = mlx5_check_dev_ids(dev, dev_id);
- if (err)
- return err;
cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
if (!cap)
return -EOPNOTSUPP;
@@ -528,6 +543,44 @@ restore:
return err;
}
+static int mlx5_pci_reset_bus(struct mlx5_core_dev *dev)
+{
+ if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method))
+ return -EOPNOTSUPP;
+
+ return pci_reset_bus(dev->pdev);
+}
+
+static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method)
+{
+ u16 dev_id;
+ int err;
+
+ err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
+ if (err)
+ return pcibios_err_to_errno(err);
+ err = mlx5_check_dev_ids(dev, dev_id);
+ if (err)
+ return err;
+
+ switch (reset_method) {
+ case MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE:
+ err = mlx5_pci_link_toggle(dev, dev_id);
+ if (err)
+ mlx5_core_warn(dev, "mlx5_pci_link_toggle failed\n");
+ break;
+ case MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET:
+ err = mlx5_pci_reset_bus(dev);
+ if (err)
+ mlx5_core_warn(dev, "mlx5_pci_reset_bus failed\n");
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
static void mlx5_sync_reset_now_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
@@ -546,9 +599,9 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
goto done;
}
- err = mlx5_pci_link_toggle(dev);
+ err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
if (err) {
- mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err);
+ mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, no reset done, err %d\n", err);
set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags);
}
@@ -610,9 +663,9 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state);
if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
- err = mlx5_pci_link_toggle(dev);
+ err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
if (err) {
- mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, err %d\n", err);
+ mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", err);
fw_reset->ret = err;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 26f8a11b8906..9772327d5124 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -74,7 +74,7 @@ static int mlx5i_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- return mlx5e_ethtool_set_ringparam(priv, param);
+ return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
static void mlx5i_get_ringparam(struct net_device *dev,
@@ -132,7 +132,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
}
static int mlx5i_get_ts_info(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index cf8045b92689..8577db3308cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -445,6 +445,34 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
}
+static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev *dev)
+{
+ struct net_device *ndev = NULL;
+ struct mlx5_lag *ldev;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+
+ if (!ldev)
+ goto unlock;
+
+ for (i = 0; i < ldev->ports; i++)
+ if (ldev->tracker.netdev_state[i].tx_enabled)
+ ndev = ldev->pf[i].netdev;
+ if (!ndev)
+ ndev = ldev->pf[ldev->ports - 1].netdev;
+
+ if (ndev)
+ dev_hold(ndev);
+
+unlock:
+ spin_unlock_irqrestore(&lag_lock, flags);
+
+ return ndev;
+}
+
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
@@ -477,9 +505,18 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
}
}
- if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
- !(ldev->mode == MLX5_LAG_MODE_ROCE))
- mlx5_lag_drop_rule_setup(ldev, tracker);
+ if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+ struct net_device *ndev = mlx5_lag_active_backup_get_netdev(dev0);
+
+ if(!(ldev->mode == MLX5_LAG_MODE_ROCE))
+ mlx5_lag_drop_rule_setup(ldev, tracker);
+ /** Only sriov and roce lag should have tracker->tx_type set so
+ * no need to check the mode
+ */
+ blocking_notifier_call_chain(&dev0->priv.lag_nh,
+ MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
+ ndev);
+ }
}
static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
@@ -613,6 +650,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
mlx5_core_err(dev0,
"Failed to deactivate RoCE LAG; driver restart required\n");
}
+ BLOCKING_INIT_NOTIFIER_HEAD(&dev0->priv.lag_nh);
return err;
}
@@ -1492,38 +1530,6 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
mlx5_queue_bond_work(ldev, 0);
}
-struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
-{
- struct net_device *ndev = NULL;
- struct mlx5_lag *ldev;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&lag_lock, flags);
- ldev = mlx5_lag_dev(dev);
-
- if (!(ldev && __mlx5_lag_is_roce(ldev)))
- goto unlock;
-
- if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
- for (i = 0; i < ldev->ports; i++)
- if (ldev->tracker.netdev_state[i].tx_enabled)
- ndev = ldev->pf[i].netdev;
- if (!ndev)
- ndev = ldev->pf[ldev->ports - 1].netdev;
- } else {
- ndev = ldev->pf[MLX5_LAG_P1].netdev;
- }
- if (ndev)
- dev_hold(ndev);
-
-unlock:
- spin_unlock_irqrestore(&lag_lock, flags);
-
- return ndev;
-}
-EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
-
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 0361741632a6..b306ae79bf97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -38,6 +38,10 @@
#include "lib/eq.h"
#include "en.h"
#include "clock.h"
+#ifdef CONFIG_X86
+#include <linux/timekeeping.h>
+#include <linux/cpufeature.h>
+#endif /* CONFIG_X86 */
enum {
MLX5_PIN_MODE_IN = 0x0,
@@ -148,6 +152,87 @@ static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
MLX5_REG_MTUTC, 0, 1);
}
+#ifdef CONFIG_X86
+static bool mlx5_is_ptm_source_time_available(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
+ int err;
+
+ if (!MLX5_CAP_MCAM_REG3(dev, mtptm))
+ return false;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTPTM,
+ 0, 0);
+ if (err)
+ return false;
+
+ return !!MLX5_GET(mtptm_reg, out, psta);
+}
+
+static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
+ struct system_counterval_t *sys_counterval,
+ void *ctx)
+{
+ u32 out[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
+ struct mlx5_core_dev *mdev = ctx;
+ bool real_time_mode;
+ u64 host, device;
+ int err;
+
+ real_time_mode = mlx5_real_time_mode(mdev);
+
+ MLX5_SET(mtctr_reg, in, first_clock_timestamp_request,
+ MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK);
+ MLX5_SET(mtctr_reg, in, second_clock_timestamp_request,
+ real_time_mode ? MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK :
+ MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER);
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTCTR,
+ 0, 0);
+ if (err)
+ return err;
+
+ if (!MLX5_GET(mtctr_reg, out, first_clock_valid) ||
+ !MLX5_GET(mtctr_reg, out, second_clock_valid))
+ return -EINVAL;
+
+ host = MLX5_GET64(mtctr_reg, out, first_clock_timestamp);
+ *sys_counterval = (struct system_counterval_t) {
+ .cycles = host,
+ .cs_id = CSID_X86_ART,
+ .use_nsecs = true,
+ };
+
+ device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp);
+ if (real_time_mode)
+ *device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
+ else
+ *device_time = mlx5_timecounter_cyc2time(&mdev->clock, device);
+
+ return 0;
+}
+
+static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct system_time_snapshot history_begin = {0};
+ struct mlx5_core_dev *mdev;
+
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
+
+ if (!mlx5_is_ptm_source_time_available(mdev))
+ return -EBUSY;
+
+ ktime_get_snapshot(&history_begin);
+
+ return get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
+ &history_begin, cts);
+}
+#endif /* CONFIG_X86 */
+
static u64 mlx5_read_time(struct mlx5_core_dev *dev,
struct ptp_system_timestamp *sts,
bool real_time)
@@ -1034,6 +1119,12 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
if (MLX5_CAP_MCAM_REG(mdev, mtutc))
mlx5_init_timer_max_freq_adjustment(mdev);
+#ifdef CONFIG_X86
+ if (MLX5_CAP_MCAM_REG3(mdev, mtptm) &&
+ MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART))
+ clock->ptp_info.getcrosststamp = mlx5_ptp_getcrosststamp;
+#endif /* CONFIG_X86 */
+
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
mlx5_init_overflow_period(clock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
index 234cd00f71a1..b7d4b1a2baf2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
@@ -386,7 +386,8 @@ static int ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev *mdev,
return -EOPNOTSUPP;
peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
- if (!peer_priv) {
+ if (!peer_priv || !peer_priv->ipsec) {
+ mlx5_core_err(mdev, "IPsec not supported on master device\n");
err = -EOPNOTSUPP;
goto release_peer;
}
@@ -455,7 +456,8 @@ static int ipsec_fs_roce_rx_mpv_create(struct mlx5_core_dev *mdev,
return -EOPNOTSUPP;
peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
- if (!peer_priv) {
+ if (!peer_priv || !peer_priv->ipsec) {
+ mlx5_core_err(mdev, "IPsec not supported on master device\n");
err = -EOPNOTSUPP;
goto release_peer;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
index d0b595ba6110..432c98f2626d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
@@ -24,6 +24,11 @@
pci_write_config_dword((dev)->pdev, (dev)->vsc_addr + (offset), (val))
#define VSC_MAX_RETRIES 2048
+/* Reading VSC registers can take relatively long time.
+ * Yield the cpu every 128 registers read.
+ */
+#define VSC_GW_READ_BLOCK_COUNT 128
+
enum {
VSC_CTRL_OFFSET = 0x4,
VSC_COUNTER_OFFSET = 0x8,
@@ -273,6 +278,7 @@ int mlx5_vsc_gw_read_block_fast(struct mlx5_core_dev *dev, u32 *data,
{
unsigned int next_read_addr = 0;
unsigned int read_addr = 0;
+ unsigned int count = 0;
while (read_addr < length) {
if (mlx5_vsc_gw_read_fast(dev, read_addr, &next_read_addr,
@@ -280,6 +286,10 @@ int mlx5_vsc_gw_read_block_fast(struct mlx5_core_dev *dev, u32 *data,
return read_addr;
read_addr = next_read_addr;
+ if (++count == VSC_GW_READ_BLOCK_COUNT) {
+ cond_resched();
+ count = 0;
+ }
}
return length;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index f6deb5a3f820..eeb0b7ea05f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -126,7 +126,7 @@ static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
}
static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm,
- u8 *host_buses, u8 *sd_group)
+ u8 *host_buses)
{
u32 out[MLX5_ST_SZ_DW(mpir_reg)];
int err;
@@ -135,10 +135,6 @@ static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm,
if (err)
return err;
- err = mlx5_query_nic_vport_sd_group(dev, sd_group);
- if (err)
- return err;
-
*sdm = MLX5_GET(mpir_reg, out, sdm);
*host_buses = MLX5_GET(mpir_reg, out, host_buses);
@@ -166,19 +162,23 @@ static int sd_init(struct mlx5_core_dev *dev)
if (mlx5_core_is_ecpf(dev))
return 0;
+ err = mlx5_query_nic_vport_sd_group(dev, &sd_group);
+ if (err)
+ return err;
+
+ if (!sd_group)
+ return 0;
+
if (!MLX5_CAP_MCAM_REG(dev, mpir))
return 0;
- err = mlx5_query_sd(dev, &sdm, &host_buses, &sd_group);
+ err = mlx5_query_sd(dev, &sdm, &host_buses);
if (err)
return err;
if (!sdm)
return 0;
- if (!sd_group)
- return 0;
-
group_id = mlx5_sd_group_id(dev, sd_group);
if (!mlx5_sd_is_supported(dev, host_buses)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 5b7e6f4b5c7e..220a9ac75c8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -454,8 +454,8 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
{
+ bool do_set = false, mem_page_fault = false;
void *set_hca_cap;
- bool do_set = false;
int err;
if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
@@ -470,6 +470,17 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur,
MLX5_ST_SZ_BYTES(odp_cap));
+ /* For best performance, enable memory scheme ODP only when
+ * it has page prefetch enabled.
+ */
+ if (MLX5_CAP_ODP_MAX(dev, mem_page_fault) &&
+ MLX5_CAP_ODP_MAX(dev, memory_page_fault_scheme_cap.page_prefetch)) {
+ mem_page_fault = true;
+ do_set = true;
+ MLX5_SET(odp_cap, set_hca_cap, mem_page_fault, mem_page_fault);
+ goto set;
+ }
+
#define ODP_CAP_SET_MAX(dev, field) \
do { \
u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
@@ -479,25 +490,28 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
} \
} while (0)
- ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
-
- if (!do_set)
- return 0;
-
- return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.ud_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.rc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.atomic);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.atomic);
+
+set:
+ if (do_set)
+ err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
+
+ mlx5_core_dbg(dev, "Using ODP %s scheme\n",
+ mem_page_fault ? "memory" : "transport");
+ return err;
}
static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
@@ -619,6 +633,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_with_driver_unload))
MLX5_SET(cmd_hca_cap, set_hca_cap,
pci_sync_for_fw_update_with_driver_unload, 1);
+ if (MLX5_CAP_GEN_MAX(dev, pcie_reset_using_hotreset_method))
+ MLX5_SET(cmd_hca_cap, set_hca_cap,
+ pcie_reset_using_hotreset_method, 1);
if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
MLX5_SET(cmd_hca_cap,
@@ -923,6 +940,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
}
mlx5_pci_vsc_init(dev);
+
+ err = pci_enable_ptm(pdev, NULL);
+ if (err)
+ mlx5_core_info(dev, "PTM is not supported by PCIe\n");
+
return 0;
err_clr_master:
@@ -939,6 +961,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev)
* before removing the pci bars
*/
mlx5_drain_health_wq(dev);
+ pci_disable_ptm(dev->pdev);
iounmap(dev->iseg);
release_bar(dev->pdev);
mlx5_pci_disable_device(dev);
@@ -2217,6 +2240,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
{ PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
+ { PCI_VDEVICE(MELLANOX, 0x1025) }, /* ConnectX-9 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d894a88fa9f2..972e8e9df585 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -608,6 +608,11 @@ enum {
RELEASE_ALL_PAGES_MASK = 0x4000,
};
+/* This limit is based on the capability of the firmware as it cannot release
+ * more than 50000 back to the host in one go.
+ */
+#define MAX_RECLAIM_NPAGES (-50000)
+
static int req_pages_handler(struct notifier_block *nb,
unsigned long type, void *data)
{
@@ -639,7 +644,16 @@ static int req_pages_handler(struct notifier_block *nb,
req->dev = dev;
req->func_id = func_id;
- req->npages = npages;
+
+ /* npages > 0 means HCA asking host to allocate/give pages,
+ * npages < 0 means HCA asking host to reclaim back the pages allocated.
+ * Here we are restricting the maximum number of pages that can be
+ * reclaimed to be MAX_RECLAIM_NPAGES. Note that MAX_RECLAIM_NPAGES is
+ * a negative value.
+ * Since MAX_RECLAIM is negative, we are using max() to restrict
+ * req->npages (and not min ()).
+ */
+ req->npages = max_t(s32, npages, MAX_RECLAIM_NPAGES);
req->ec_function = ec_function;
req->release_all = release_all;
INIT_WORK(&req->work, pages_work_handler);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
index 8bce730b5c5b..db2bd3ad63ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
@@ -28,6 +28,9 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP))
+ return -EOPNOTSUPP;
+
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP);
@@ -44,6 +47,10 @@ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
void *attr;
+ if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_TSAR) ||
+ !(MLX5_CAP_QOS(mdev, nic_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
+ return -EOPNOTSUPP;
+
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 8c2a34a0d6be..baefb9a3fa05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -251,9 +251,9 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
- flow_table_context.sw_owner_icm_root_1);
+ flow_table_context.sws.sw_owner_icm_root_1);
output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
- flow_table_context.sw_owner_icm_root_0);
+ flow_table_context.sws.sw_owner_icm_root_0);
return 0;
}
@@ -480,15 +480,15 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
*/
if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_rx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_rx);
} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_tx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_tx);
} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_rx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_rx);
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_1, attr->icm_addr_tx);
+ sws.sw_owner_icm_root_1, attr->icm_addr_tx);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 50c2554c9ccf..833cb68c744f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -9,14 +9,6 @@
#include "fs_dr.h"
#include "dr_types.h"
-static bool dr_is_fw_term_table(struct mlx5_flow_table *ft)
-{
- if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
- return true;
-
- return false;
-}
-
static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 underlay_qpn,
@@ -70,7 +62,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
u32 flags;
int err;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
ft_attr,
next_ft);
@@ -110,7 +102,7 @@ static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
int err;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
@@ -135,7 +127,7 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
return set_miss_action(ns, ft, next_ft);
@@ -154,7 +146,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
match_criteria_enable);
struct mlx5dr_match_parameters mask;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
fg);
@@ -179,7 +171,7 @@ static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
@@ -279,7 +271,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
int err = 0;
int i;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
@@ -306,12 +298,12 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
match_sz = sizeof(fte->val);
/* Drop reformat action bit if destination vport set with reformat */
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
if (!contain_vport_reformat_action(dst))
continue;
- fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
break;
}
}
@@ -321,7 +313,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
* TX: modify header -> push vlan -> encap
* RX: decap -> pop vlan -> modify header
*/
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
enum mlx5dr_action_reformat_type decap_type =
DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
@@ -337,26 +329,26 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
bool is_decap;
- if (fte->action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
+ if (fte->act_dests.action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
err = -EINVAL;
mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n");
goto free_actions;
}
- is_decap = fte->action.pkt_reformat->reformat_type ==
+ is_decap = fte->act_dests.action.pkt_reformat->reformat_type ==
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
if (is_decap)
actions[num_actions++] =
- fte->action.pkt_reformat->action.dr_action;
+ fte->act_dests.action.pkt_reformat->action.dr_action;
else
delay_encap_set = true;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
tmp_action =
mlx5dr_action_create_pop_vlan();
if (!tmp_action) {
@@ -367,7 +359,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
tmp_action =
mlx5dr_action_create_pop_vlan();
if (!tmp_action) {
@@ -378,12 +370,12 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
actions[num_actions++] =
- fte->action.modify_hdr->action.dr_action;
+ fte->act_dests.action.modify_hdr->action.dr_action;
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
- tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[0]);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -392,8 +384,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
- tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[1]);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -404,11 +396,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
if (delay_encap_set)
actions[num_actions++] =
- fte->action.pkt_reformat->action.dr_action;
+ fte->act_dests.action.pkt_reformat->action.dr_action;
/* The order of the actions below is not important */
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
tmp_action = mlx5dr_action_create_drop();
if (!tmp_action) {
err = -ENOMEM;
@@ -418,9 +410,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
term_actions[num_term_actions++].dest = tmp_action;
}
- if (fte->flow_context.flow_tag) {
+ if (fte->act_dests.flow_context.flow_tag) {
tmp_action =
- mlx5dr_action_create_tag(fte->flow_context.flow_tag);
+ mlx5dr_action_create_tag(fte->act_dests.flow_context.flow_tag);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -429,7 +421,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
u32 id;
@@ -510,7 +502,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
list_for_each_entry(dst, &fte->node.children, node.list) {
u32 id;
@@ -537,19 +529,21 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
- if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ struct mlx5_flow_act *action = &fte->act_dests.action;
+
+ if (fte->act_dests.action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
err = -EOPNOTSUPP;
goto free_actions;
}
tmp_action =
mlx5dr_action_create_aso(domain,
- fte->action.exe_aso.object_id,
- fte->action.exe_aso.return_reg_id,
- fte->action.exe_aso.type,
- fte->action.exe_aso.flow_meter.init_color,
- fte->action.exe_aso.flow_meter.meter_idx);
+ action->exe_aso.object_id,
+ action->exe_aso.return_reg_id,
+ action->exe_aso.type,
+ action->exe_aso.flow_meter.init_color,
+ action->exe_aso.flow_meter.meter_idx);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -576,8 +570,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = term_actions->dest;
} else if (num_term_actions > 1) {
bool ignore_flow_level =
- !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
- u32 flow_source = fte->flow_context.flow_source;
+ !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ u32 flow_source = fte->act_dests.flow_context.flow_source;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
@@ -601,7 +595,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
&params,
num_actions,
actions,
- fte->flow_context.flow_source);
+ fte->act_dests.flow_context.flow_source);
if (!rule) {
err = -EINVAL;
goto free_actions;
@@ -740,7 +734,7 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
int err;
int i;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
err = mlx5dr_rule_destroy(rule->dr_rule);
@@ -765,7 +759,7 @@ static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
struct fs_fte fte_tmp = {};
int ret;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte);
/* Backup current dr rule details */
@@ -819,11 +813,11 @@ static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
- u32 steering_caps = 0;
+ u32 steering_caps = MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH;
if (ft_type != FS_FT_FDB ||
MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
- return 0;
+ return steering_caps;
steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX;
steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile
new file mode 100644
index 000000000000..c78512eed8d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+subdir-ccflags-y += -I$(src)/..
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
new file mode 100644
index 000000000000..f39d636ff39a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -0,0 +1,926 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_H_
+#define MLX5HWS_H_
+
+struct mlx5hws_context;
+struct mlx5hws_table;
+struct mlx5hws_matcher;
+struct mlx5hws_rule;
+
+enum mlx5hws_table_type {
+ MLX5HWS_TABLE_TYPE_FDB,
+ MLX5HWS_TABLE_TYPE_MAX,
+};
+
+enum mlx5hws_matcher_resource_mode {
+ /* Allocate resources based on number of rules with minimal failure probability */
+ MLX5HWS_MATCHER_RESOURCE_MODE_RULE,
+ /* Allocate fixed size hash table based on given column and rows */
+ MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE,
+};
+
+enum mlx5hws_action_type {
+ MLX5HWS_ACTION_TYP_LAST,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
+ MLX5HWS_ACTION_TYP_DROP,
+ MLX5HWS_ACTION_TYP_MISS,
+ MLX5HWS_ACTION_TYP_TBL,
+ MLX5HWS_ACTION_TYP_CTR,
+ MLX5HWS_ACTION_TYP_TAG,
+ MLX5HWS_ACTION_TYP_MODIFY_HDR,
+ MLX5HWS_ACTION_TYP_VPORT,
+ MLX5HWS_ACTION_TYP_POP_VLAN,
+ MLX5HWS_ACTION_TYP_PUSH_VLAN,
+ MLX5HWS_ACTION_TYP_ASO_METER,
+ MLX5HWS_ACTION_TYP_INSERT_HEADER,
+ MLX5HWS_ACTION_TYP_REMOVE_HEADER,
+ MLX5HWS_ACTION_TYP_RANGE,
+ MLX5HWS_ACTION_TYP_SAMPLER,
+ MLX5HWS_ACTION_TYP_DEST_ARRAY,
+ MLX5HWS_ACTION_TYP_MAX,
+};
+
+enum mlx5hws_action_flags {
+ MLX5HWS_ACTION_FLAG_HWS_FDB = 1 << 0,
+ /* Shared action can be used over a few threads, since the
+ * data is written only once at the creation of the action.
+ */
+ MLX5HWS_ACTION_FLAG_SHARED = 1 << 1,
+};
+
+enum mlx5hws_action_aso_meter_color {
+ MLX5HWS_ACTION_ASO_METER_COLOR_RED = 0x0,
+ MLX5HWS_ACTION_ASO_METER_COLOR_YELLOW = 0x1,
+ MLX5HWS_ACTION_ASO_METER_COLOR_GREEN = 0x2,
+ MLX5HWS_ACTION_ASO_METER_COLOR_UNDEFINED = 0x3,
+};
+
+enum mlx5hws_send_queue_actions {
+ /* Start executing all pending queued rules */
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC = 1 << 0,
+ /* Start executing all pending queued rules wait till completion */
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC = 1 << 1,
+};
+
+struct mlx5hws_context_attr {
+ u16 queues;
+ u16 queue_size;
+ bool bwc; /* add support for backward compatible API*/
+};
+
+struct mlx5hws_table_attr {
+ enum mlx5hws_table_type type;
+ u32 level;
+};
+
+enum mlx5hws_matcher_flow_src {
+ MLX5HWS_MATCHER_FLOW_SRC_ANY = 0x0,
+ MLX5HWS_MATCHER_FLOW_SRC_WIRE = 0x1,
+ MLX5HWS_MATCHER_FLOW_SRC_VPORT = 0x2,
+};
+
+enum mlx5hws_matcher_insert_mode {
+ MLX5HWS_MATCHER_INSERT_BY_HASH = 0x0,
+ MLX5HWS_MATCHER_INSERT_BY_INDEX = 0x1,
+};
+
+enum mlx5hws_matcher_distribute_mode {
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH = 0x0,
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR = 0x1,
+};
+
+struct mlx5hws_matcher_attr {
+ /* Processing priority inside table */
+ u32 priority;
+ /* Provide all rules with unique rule_idx in num_log range to reduce locking */
+ bool optimize_using_rule_idx;
+ /* Resource mode and corresponding size */
+ enum mlx5hws_matcher_resource_mode mode;
+ /* Optimize insertion in case packet origin is the same for all rules */
+ enum mlx5hws_matcher_flow_src optimize_flow_src;
+ /* Define the insertion and distribution modes for this matcher */
+ enum mlx5hws_matcher_insert_mode insert_mode;
+ enum mlx5hws_matcher_distribute_mode distribute_mode;
+ /* Define whether the created matcher supports resizing into a bigger matcher */
+ bool resizable;
+ union {
+ struct {
+ u8 sz_row_log;
+ u8 sz_col_log;
+ } table;
+
+ struct {
+ u8 num_log;
+ } rule;
+ };
+ /* Optional AT attach configuration - Max number of additional AT */
+ u8 max_num_of_at_attach;
+};
+
+struct mlx5hws_rule_attr {
+ void *user_data;
+ /* Valid if matcher optimize_using_rule_idx is set or
+ * if matcher is configured to insert rules by index.
+ */
+ u32 rule_idx;
+ u32 flow_source;
+ u16 queue_id;
+ u32 burst:1;
+};
+
+/* In actions that take offset, the offset is unique, pointing to a single
+ * resource and the user should not reuse the same index because data changing
+ * is not atomic.
+ */
+struct mlx5hws_rule_action {
+ struct mlx5hws_action *action;
+ union {
+ struct {
+ u32 value;
+ } tag;
+
+ struct {
+ u32 offset;
+ } counter;
+
+ struct {
+ u32 offset;
+ u8 *data;
+ } modify_header;
+
+ struct {
+ u32 offset;
+ u8 hdr_idx;
+ u8 *data;
+ } reformat;
+
+ struct {
+ __be32 vlan_hdr;
+ } push_vlan;
+
+ struct {
+ u32 offset;
+ enum mlx5hws_action_aso_meter_color init_color;
+ } aso_meter;
+ };
+};
+
+struct mlx5hws_action_reformat_header {
+ size_t sz;
+ void *data;
+};
+
+struct mlx5hws_action_insert_header {
+ struct mlx5hws_action_reformat_header hdr;
+ /* PRM start anchor to which header will be inserted */
+ u8 anchor;
+ /* Header insertion offset in bytes, from the start
+ * anchor to the location where new header will be inserted.
+ */
+ u8 offset;
+ /* Indicates this header insertion adds encapsulation header to the packet,
+ * requiring device to update offloaded fields (for example IPv4 total length).
+ */
+ bool encap;
+};
+
+struct mlx5hws_action_remove_header_attr {
+ /* PRM start anchor from which header will be removed */
+ u8 anchor;
+ /* Header remove offset in bytes, from the start
+ * anchor to the location where remove header starts.
+ */
+ u8 offset;
+ /* Indicates the removed header size in bytes */
+ size_t size;
+};
+
+struct mlx5hws_action_mh_pattern {
+ /* Byte size of modify actions provided by "data" */
+ size_t sz;
+ /* PRM format modify actions pattern */
+ __be64 *data;
+};
+
+struct mlx5hws_action_dest_attr {
+ /* Required destination action to forward the packet */
+ struct mlx5hws_action *dest;
+ /* Optional reformat action */
+ struct mlx5hws_action *reformat;
+};
+
+/**
+ * mlx5hws_is_supported - Check whether HWS is supported
+ *
+ * @mdev: The device to check.
+ *
+ * Return: true if supported, false otherwise.
+ */
+static inline bool mlx5hws_is_supported(struct mlx5_core_dev *mdev)
+{
+ u8 ignore_flow_level_rtc_valid;
+ u8 wqe_based_flow_table_update;
+
+ wqe_based_flow_table_update =
+ MLX5_CAP_GEN(mdev, wqe_based_flow_table_update_cap);
+ ignore_flow_level_rtc_valid =
+ MLX5_CAP_FLOWTABLE(mdev,
+ flow_table_properties_nic_receive.ignore_flow_level_rtc_valid);
+
+ return wqe_based_flow_table_update && ignore_flow_level_rtc_valid;
+}
+
+/**
+ * mlx5hws_context_open - Open a context used for direct rule insertion
+ * using hardware steering.
+ *
+ * @mdev: The device to be used for HWS.
+ * @attr: Attributes used for context open.
+ *
+ * Return: pointer to mlx5hws_context on success NULL otherwise.
+ */
+struct mlx5hws_context *
+mlx5hws_context_open(struct mlx5_core_dev *mdev,
+ struct mlx5hws_context_attr *attr);
+
+/**
+ * mlx5hws_context_close - Close a context used for direct hardware steering.
+ *
+ * @ctx: mlx5hws context to close.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_context_close(struct mlx5hws_context *ctx);
+
+/**
+ * mlx5hws_context_set_peer - Set a peer context.
+ * Each context can have multiple contexts as peers.
+ *
+ * @ctx: The context in which the peer_ctx will be peered to it.
+ * @peer_ctx: The peer context.
+ * @peer_vhca_id: The peer context vhca id.
+ */
+void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
+ struct mlx5hws_context *peer_ctx,
+ u16 peer_vhca_id);
+
+/**
+ * mlx5hws_table_create - Create a new direct rule table.
+ * Each table can contain multiple matchers.
+ *
+ * @ctx: The context in which the new table will be opened.
+ * @attr: Attributes used for table creation.
+ *
+ * Return: pointer to mlx5hws_table on success NULL otherwise.
+ */
+struct mlx5hws_table *
+mlx5hws_table_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_table_attr *attr);
+
+/**
+ * mlx5hws_table_destroy - Destroy direct rule table.
+ *
+ * @tbl: Table to destroy.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_table_destroy(struct mlx5hws_table *tbl);
+
+/**
+ * mlx5hws_table_get_id() - Get ID of the flow table.
+ *
+ * @tbl:Table to get ID of.
+ *
+ * Return: ID of the table.
+ */
+u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl);
+
+/**
+ * mlx5hws_table_set_default_miss - Set default miss table for mlx5hws_table
+ * by using another mlx5hws_table.
+ * Traffic which all table matchers miss will be forwarded to miss table.
+ *
+ * @tbl: Source table
+ * @miss_tbl: Target (miss) table, or NULL to remove current miss table
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl);
+
+/**
+ * mlx5hws_match_template_create - Create a new match template based on items mask.
+ * The match template will be used for matcher creation.
+ *
+ * @ctx: The context in which the new template will be created.
+ * @match_param: Describe the mask based on PRM match parameters.
+ * @match_param_sz: Size of match param buffer.
+ * @match_criteria_enable: Bitmap for each sub-set in match_criteria buffer.
+ *
+ * Return: Pointer to mlx5hws_match_template on success, NULL otherwise.
+ */
+struct mlx5hws_match_template *
+mlx5hws_match_template_create(struct mlx5hws_context *ctx,
+ u32 *match_param,
+ u32 match_param_sz,
+ u8 match_criteria_enable);
+
+/**
+ * mlx5hws_match_template_destroy - Destroy a match template.
+ *
+ * @mt: Match template to destroy.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt);
+
+/**
+ * mlx5hws_action_template_create - Create a new action template based on an action_type array.
+ *
+ * @action_type: An array of actions based on the order of actions which will be provided
+ * with rule_actions to mlx5hws_rule_create. The last action is marked
+ * using MLX5HWS_ACTION_TYP_LAST.
+ *
+ * Return: Pointer to mlx5hws_action_template on success, NULL otherwise.
+ */
+struct mlx5hws_action_template *
+mlx5hws_action_template_create(enum mlx5hws_action_type action_type[]);
+
+/**
+ * mlx5hws_action_template_destroy - Destroy action template.
+ *
+ * @at: Action template to destroy.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at);
+
+/**
+ * mlx5hws_matcher_create - Create a new direct rule matcher.
+ *
+ * Each matcher can contain multiple rules. Matchers on the table will be
+ * processed by priority. Matching fields and mask are described by the
+ * match template. In some cases, multiple match templates can be used on
+ * the same matcher.
+ *
+ * @table: The table in which the new matcher will be opened.
+ * @mt: Array of match templates to be used on matcher.
+ * @num_of_mt: Number of match templates in mt array.
+ * @at: Array of action templates to be used on matcher.
+ * @num_of_at: Number of action templates in at array.
+ * @attr: Attributes used for matcher creation.
+ *
+ * Return: Pointer to mlx5hws_matcher on success, NULL otherwise.
+ *
+ */
+struct mlx5hws_matcher *
+mlx5hws_matcher_create(struct mlx5hws_table *table,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at,
+ struct mlx5hws_matcher_attr *attr);
+
+/**
+ * mlx5hws_matcher_destroy - Destroy a direct rule matcher.
+ *
+ * @matcher: Matcher to destroy.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher);
+
+/**
+ * mlx5hws_matcher_attach_at - Attach a new action template to a direct rule matcher.
+ *
+ * @matcher: Matcher to attach the action template to.
+ * @at: Action template to be attached to the matcher.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at);
+
+/**
+ * mlx5hws_matcher_resize_set_target - Link two matchers and enable moving rules.
+ *
+ * Both matchers must be in the same table type, must be created with the
+ * 'resizable' property, and should have the same characteristics (e.g., same
+ * match templates and action templates). It is the user's responsibility to
+ * ensure that the destination matcher is allocated with the appropriate size.
+ *
+ * Once the function is completed, the user is:
+ * - Allowed to move rules from the source into the destination matcher.
+ * - No longer allowed to insert rules into the source matcher.
+ *
+ * The user is always allowed to insert rules into the destination matcher and
+ * to delete rules from any matcher.
+ *
+ * @src_matcher: Source matcher for moving rules from.
+ * @dst_matcher: Destination matcher for moving rules to.
+ *
+ * Return: Zero on successful move, non-zero otherwise.
+ */
+int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher);
+
+/**
+ * mlx5hws_matcher_resize_rule_move - Enqueue moving rule operation.
+ *
+ * This function enqueues the operation of moving a rule from the source
+ * matcher to the destination matcher.
+ *
+ * @src_matcher: Matcher that the rule belongs to.
+ * @rule: The rule to move.
+ * @attr: Rule attributes.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+/**
+ * mlx5hws_rule_create - Enqueue create rule operation.
+ *
+ * @matcher: The matcher in which the new rule will be created.
+ * @mt_idx: Match template index to create the match with.
+ * @match_param: The match parameter PRM buffer used for value matching.
+ * @at_idx: Action template index to apply the actions with.
+ * @rule_actions: Rule actions to be executed on match.
+ * @attr: Rule creation attributes.
+ * @rule_handle: A valid rule handle. The handle doesn't require any initialization.
+ *
+ * Return: Zero on successful enqueue, non-zero otherwise.
+ */
+int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr,
+ struct mlx5hws_rule *rule_handle);
+
+/**
+ * mlx5hws_rule_destroy - Enqueue destroy rule operation.
+ *
+ * @rule: The rule destruction to enqueue.
+ * @attr: Rule destruction attributes.
+ *
+ * Return: Zero on successful enqueue, non-zero otherwise.
+ */
+int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+/**
+ * mlx5hws_rule_action_update - Enqueue update actions on an existing rule.
+ *
+ * @rule: A valid rule handle to update.
+ * @at_idx: Action template index to update the actions with.
+ * @rule_actions: Rule actions to be executed on match.
+ * @attr: Rule update attributes.
+ *
+ * Return: Zero on successful enqueue, non-zero otherwise.
+ */
+int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr);
+
+/**
+ * mlx5hws_action_get_type - Get action type.
+ *
+ * @action: The action to get the type of.
+ *
+ * Return: action type.
+ */
+enum mlx5hws_action_type
+mlx5hws_action_get_type(struct mlx5hws_action *action);
+
+/**
+ * mlx5hws_action_create_dest_drop - Create a direct rule drop action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: Pointer to mlx5hws_action on success, NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_default_miss - Create a direct rule default miss action.
+ * Defaults are RX: Drop, TX: Wire.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: Pointer to mlx5hws_action on success, NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_table - Create direct rule goto table action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @tbl: Destination table.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_table_num - Create direct rule goto table number action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @tbl_num: Destination table number.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx,
+ u32 tbl_num, u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_match_range - Create direct rule range match action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @field: Field to comapare the value.
+ * @hit_ft: Flow table to go to on hit.
+ * @miss_ft: Flow table to go to on miss.
+ * @min: Minimal value of the field to be considered as hit.
+ * @max: Maximal value of the field to be considered as hit.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
+ u32 field,
+ struct mlx5_flow_table *hit_ft,
+ struct mlx5_flow_table *miss_ft,
+ u32 min, u32 max, u32 flags);
+
+/**
+ * mlx5hws_action_create_flow_sampler - Create direct rule flow sampler action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @sampler_id: Flow sampler object ID.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
+ u32 sampler_id, u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_vport - Create direct rule goto vport action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @vport_num: Destination vport number.
+ * @vhca_id_valid: Tells if the vhca_id parameter is valid.
+ * @vhca_id: VHCA ID of the destination vport.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx,
+ u16 vport_num,
+ bool vhca_id_valid,
+ u16 vhca_id,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_tag - Create direct rule TAG action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_create_counter - Create direct rule counter action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @obj_id: Direct rule counter object ID.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_counter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_reformat - Create direct rule reformat action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @reformat_type: Type of reformat prefixed with MLX5HWS_ACTION_TYP_REFORMAT.
+ * @num_of_hdrs: Number of provided headers in "hdrs" array.
+ * @hdrs: Headers array containing header information.
+ * @log_bulk_size: Number of unique values used with this reformat.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_reformat(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type reformat_type,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_modify_header - Create direct rule modify header action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @num_of_patterns: Number of provided patterns in "patterns" array.
+ * @patterns: Patterns array containing pattern information.
+ * @log_bulk_size: Number of unique values used with this pattern.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *patterns,
+ u32 log_bulk_size,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_aso_meter - Create direct rule ASO flow meter action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @obj_id: ASO object ID.
+ * @return_reg_c: Copy the ASO object value into this reg_c,
+ * after a packet hits a rule with this ASO object.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u8 return_reg_c,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_pop_vlan - Create direct rule pop vlan action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_create_push_vlan - Create direct rule push vlan action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_array - Create a dest array action, this action can
+ * duplicate packets and forward to multiple destinations in the destination list.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @num_dest: The number of dests attributes.
+ * @dests: The destination array. Each contains a destination action and can
+ * have additional actions.
+ * @ignore_flow_level: Whether to turn on 'ignore_flow_level' for this dest.
+ * @flow_source: Source port of the traffic for this actions.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
+ size_t num_dest,
+ struct mlx5hws_action_dest_attr *dests,
+ bool ignore_flow_level,
+ u32 flow_source,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_insert_header - Create insert header action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @num_of_hdrs: Number of provided headers in "hdrs" array.
+ * @hdrs: Headers array containing header information.
+ * @log_bulk_size: Number of unique values used with this insert header.
+ * @flags: Action creation flags. (enum mlx5hws_action_flags)
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_insert_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_remove_header - Create remove header action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @attr: attributes that specifie the remove header type, PRM start anchor and
+ * the PRM end anchor or the PRM start anchor and remove size in bytes.
+ * @flags: Action creation flags. (enum mlx5hws_action_flags)
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_remove_header_attr *attr,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_last - Create direct rule LAST action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags. (enum mlx5hws_action_flags)
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_destroy - Destroy direct rule action.
+ *
+ * @action: The action to destroy.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_action_destroy(struct mlx5hws_action *action);
+
+enum mlx5hws_flow_op_status {
+ MLX5HWS_FLOW_OP_SUCCESS,
+ MLX5HWS_FLOW_OP_ERROR,
+};
+
+struct mlx5hws_flow_op_result {
+ enum mlx5hws_flow_op_status status;
+ void *user_data;
+};
+
+/**
+ * mlx5hws_send_queue_poll - Poll queue for rule creation and deletions completions.
+ *
+ * @ctx: The context to which the queue belong to.
+ * @queue_id: The id of the queue to poll.
+ * @res: Completion array.
+ * @res_nb: Maximum number of results to return.
+ *
+ * Return: negative number on failure, the number of completions otherwise.
+ */
+int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb);
+
+/**
+ * mlx5hws_send_queue_action - Perform an action on the queue
+ *
+ * @ctx: The context to which the queue belong to.
+ * @queue_id: The id of the queue to perform the action on.
+ * @actions: Actions to perform on the queue (enum mlx5hws_send_queue_actions)
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions);
+
+/**
+ * mlx5hws_debug_dump - Dump HWS info
+ *
+ * @ctx: The context which to dump the info from.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_debug_dump(struct mlx5hws_context *ctx);
+
+struct mlx5hws_bwc_matcher;
+struct mlx5hws_bwc_rule;
+
+struct mlx5hws_match_parameters {
+ size_t match_sz;
+ u32 *match_buf; /* Device spec format */
+};
+
+/**
+ * mlx5hws_bwc_matcher_create - Create a new BWC direct rule matcher.
+ *
+ * This function does the following:
+ * - creates match template based on flow items
+ * - creates an empty action template
+ * - creates a usual mlx5hws_matcher with these mt and at, setting
+ * its size to minimal
+ * Notes:
+ * - table->ctx must have BWC support
+ * - complex rules are not supported
+ *
+ * @table: The table in which the new matcher will be opened
+ * @priority: Priority for this BWC matcher
+ * @match_criteria_enable: Bitmask that defines matching criteria
+ * @mask: Match parameters
+ *
+ * Return: pointer to mlx5hws_bwc_matcher on success or NULL otherwise.
+ */
+struct mlx5hws_bwc_matcher *
+mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+/**
+ * mlx5hws_bwc_matcher_destroy - Destroy BWC direct rule matcher.
+ *
+ * @bwc_matcher: Matcher to destroy
+ *
+ * Return: zero on success, non zero otherwise
+ */
+int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+/**
+ * mlx5hws_bwc_rule_create - Create a new BWC rule.
+ *
+ * Unlike the usual rule creation function, this one is blocking: when the
+ * function returns, the rule is written to its place (no need to poll).
+ * This function does the following:
+ * - finds matching action template based on the provided rule_actions, or
+ * creates new action template if matching action template doesn't exist
+ * - updates corresponding BWC matcher stats
+ * - if needed, the function performs rehash:
+ * - creates a new matcher based on mt, at, new_sz
+ * - moves all the existing matcher rules to the new matcher
+ * - removes the old matcher
+ * - inserts new rule
+ * - polls till completion is received
+ * Notes:
+ * - matcher->tbl->ctx must have BWC support
+ * - separate BWC ctx queues are used
+ *
+ * @bwc_matcher: The BWC matcher in which the new rule will be created.
+ * @params: Match perameters
+ * @flow_source: Flow source for this rule
+ * @rule_actions: Rule action to be executed on match
+ *
+ * Return: valid BWC rule handle on success, NULL otherwise
+ */
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[]);
+
+/**
+ * mlx5hws_bwc_rule_destroy - Destroy BWC direct rule.
+ *
+ * @bwc_rule: Rule to destroy.
+ *
+ * Return: zero on success, non zero otherwise.
+ */
+int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule);
+
+/**
+ * mlx5hws_bwc_rule_action_update - Update actions on an existing BWC rule.
+ *
+ * @bwc_rule: Rule to update
+ * @rule_actions: Rule action to update with
+ *
+ * Return: zero on successful update, non zero otherwise.
+ */
+int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[]);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c
new file mode 100644
index 000000000000..b27bb4106532
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c
@@ -0,0 +1,2604 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+#define MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET 1
+
+/* Header removal size limited to 128B (64 words) */
+#define MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE 128
+
+/* This is the longest supported action sequence for FDB table:
+ * DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term.
+ */
+static const u32 action_order_arr[MLX5HWS_TABLE_TYPE_MAX][MLX5HWS_ACTION_TYP_MAX] = {
+ [MLX5HWS_TABLE_TYPE_FDB] = {
+ BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
+ BIT(MLX5HWS_ACTION_TYP_CTR),
+ BIT(MLX5HWS_ACTION_TYP_TAG),
+ BIT(MLX5HWS_ACTION_TYP_ASO_METER),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_TBL) |
+ BIT(MLX5HWS_ACTION_TYP_VPORT) |
+ BIT(MLX5HWS_ACTION_TYP_DROP) |
+ BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
+ BIT(MLX5HWS_ACTION_TYP_RANGE) |
+ BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
+ BIT(MLX5HWS_ACTION_TYP_LAST),
+ },
+};
+
+static const char * const mlx5hws_action_type_str[] = {
+ [MLX5HWS_ACTION_TYP_LAST] = "LAST",
+ [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2] = "TNL_L2_TO_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2] = "L2_TO_TNL_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2] = "TNL_L3_TO_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3] = "L2_TO_TNL_L3",
+ [MLX5HWS_ACTION_TYP_DROP] = "DROP",
+ [MLX5HWS_ACTION_TYP_TBL] = "TBL",
+ [MLX5HWS_ACTION_TYP_CTR] = "CTR",
+ [MLX5HWS_ACTION_TYP_TAG] = "TAG",
+ [MLX5HWS_ACTION_TYP_MODIFY_HDR] = "MODIFY_HDR",
+ [MLX5HWS_ACTION_TYP_VPORT] = "VPORT",
+ [MLX5HWS_ACTION_TYP_MISS] = "DEFAULT_MISS",
+ [MLX5HWS_ACTION_TYP_POP_VLAN] = "POP_VLAN",
+ [MLX5HWS_ACTION_TYP_PUSH_VLAN] = "PUSH_VLAN",
+ [MLX5HWS_ACTION_TYP_ASO_METER] = "ASO_METER",
+ [MLX5HWS_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY",
+ [MLX5HWS_ACTION_TYP_INSERT_HEADER] = "INSERT_HEADER",
+ [MLX5HWS_ACTION_TYP_REMOVE_HEADER] = "REMOVE_HEADER",
+ [MLX5HWS_ACTION_TYP_SAMPLER] = "SAMPLER",
+ [MLX5HWS_ACTION_TYP_RANGE] = "RANGE",
+};
+
+static_assert(ARRAY_SIZE(mlx5hws_action_type_str) == MLX5HWS_ACTION_TYP_MAX,
+ "Missing mlx5hws_action_type_str");
+
+const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type)
+{
+ return mlx5hws_action_type_str[action_type];
+}
+
+enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action)
+{
+ return action->type;
+}
+
+static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
+ enum mlx5hws_context_shared_stc_type stc_type,
+ u8 tbl_type)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_action_shared_stc *shared_stc;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (ctx->common_res[tbl_type].shared_stc[stc_type]) {
+ ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++;
+ mutex_unlock(&ctx->ctrl_lock);
+ return 0;
+ }
+
+ shared_stc = kzalloc(sizeof(*shared_stc), GFP_KERNEL);
+ if (!shared_stc) {
+ ret = -ENOMEM;
+ goto unlock_and_out;
+ }
+ switch (stc_type) {
+ case MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3:
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.remove_header.decap = 0;
+ stc_attr.remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ stc_attr.remove_header.end_anchor = MLX5_HEADER_ANCHOR_IPV6_IPV4;
+ break;
+ case MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP:
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ stc_attr.remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;
+ stc_attr.remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN;
+ break;
+ default:
+ mlx5hws_err(ctx, "No such stc_type: %d\n", stc_type);
+ pr_warn("HWS: Invalid stc_type: %d\n", stc_type);
+ ret = -EINVAL;
+ goto unlock_and_out;
+ }
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &shared_stc->stc_chunk);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate shared decap l2 STC\n");
+ goto free_shared_stc;
+ }
+
+ ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc;
+ ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1;
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+free_shared_stc:
+ kfree(shared_stc);
+unlock_and_out:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static int hws_action_get_shared_stc(struct mlx5hws_action *action,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ struct mlx5hws_context *ctx = action->ctx;
+ int ret;
+
+ if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) {
+ pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type);
+ return -EINVAL;
+ }
+
+ if (unlikely(!(action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB))) {
+ pr_warn("HWS: Invalid action->flags: %d\n", action->flags);
+ return -EINVAL;
+ }
+
+ ret = hws_action_get_shared_stc_nic(ctx, stc_type, MLX5HWS_TABLE_TYPE_FDB);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to allocate memory for FDB shared STCs (type: %d)\n",
+ stc_type);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hws_action_put_shared_stc(struct mlx5hws_action *action,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ enum mlx5hws_table_type tbl_type = MLX5HWS_TABLE_TYPE_FDB;
+ struct mlx5hws_action_shared_stc *shared_stc;
+ struct mlx5hws_context *ctx = action->ctx;
+
+ if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) {
+ pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type);
+ return;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) {
+ mutex_unlock(&ctx->ctrl_lock);
+ return;
+ }
+
+ shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type];
+
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk);
+ kfree(shared_stc);
+ ctx->common_res[tbl_type].shared_stc[stc_type] = NULL;
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static void hws_action_print_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions)
+{
+ mlx5hws_err(ctx, "Invalid action_type sequence");
+ while (*user_actions != MLX5HWS_ACTION_TYP_LAST) {
+ mlx5hws_err(ctx, " %s", mlx5hws_action_type_to_str(*user_actions));
+ user_actions++;
+ }
+ mlx5hws_err(ctx, "\n");
+}
+
+bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions,
+ enum mlx5hws_table_type table_type)
+{
+ const u32 *order_arr = action_order_arr[table_type];
+ u8 order_idx = 0;
+ u8 user_idx = 0;
+ bool valid_combo;
+
+ if (table_type >= MLX5HWS_TABLE_TYPE_MAX) {
+ mlx5hws_err(ctx, "Invalid table_type %d", table_type);
+ return false;
+ }
+
+ while (order_arr[order_idx] != BIT(MLX5HWS_ACTION_TYP_LAST)) {
+ /* User action order validated move to next user action */
+ if (BIT(user_actions[user_idx]) & order_arr[order_idx])
+ user_idx++;
+
+ /* Iterate to the next supported action in the order */
+ order_idx++;
+ }
+
+ /* Combination is valid if all user action were processed */
+ valid_combo = user_actions[user_idx] == MLX5HWS_ACTION_TYP_LAST;
+ if (!valid_combo)
+ hws_action_print_combo(ctx, user_actions);
+
+ return valid_combo;
+}
+
+static bool
+hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ struct mlx5hws_cmd_stc_modify_attr *fixup_stc_attr,
+ enum mlx5hws_table_type table_type,
+ bool is_mirror)
+{
+ bool use_fixup = false;
+ u32 fw_tbl_type;
+ u32 base_id;
+
+ fw_tbl_type = mlx5hws_table_get_res_fw_ft_type(table_type, is_mirror);
+
+ switch (stc_attr->action_type) {
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
+ if (is_mirror && stc_attr->ste_table.ignore_tx) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ use_fixup = true;
+ break;
+ }
+ if (!is_mirror)
+ base_id = mlx5hws_pool_chunk_get_base_id(stc_attr->ste_table.ste_pool,
+ &stc_attr->ste_table.ste);
+ else
+ base_id =
+ mlx5hws_pool_chunk_get_base_mirror_id(stc_attr->ste_table.ste_pool,
+ &stc_attr->ste_table.ste);
+
+ *fixup_stc_attr = *stc_attr;
+ fixup_stc_attr->ste_table.ste_obj_id = base_id;
+ use_fixup = true;
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_TAG:
+ if (fw_tbl_type == FS_FT_FDB_TX) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;
+ fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ use_fixup = true;
+ }
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
+ if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;
+ fixup_stc_attr->action_offset = stc_attr->action_offset;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ fixup_stc_attr->vport.esw_owner_vhca_id = ctx->caps->vhca_id;
+ fixup_stc_attr->vport.vport_num = ctx->caps->eswitch_manager_vport_number;
+ fixup_stc_attr->vport.eswitch_owner_vhca_id_valid =
+ ctx->caps->merged_eswitch;
+ use_fixup = true;
+ }
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
+ if (stc_attr->vport.vport_num != MLX5_VPORT_UPLINK)
+ break;
+
+ if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) {
+ /* The FW doesn't allow to go to wire in the TX/RX by JUMP_TO_VPORT */
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK;
+ fixup_stc_attr->action_offset = stc_attr->action_offset;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ fixup_stc_attr->vport.vport_num = 0;
+ fixup_stc_attr->vport.esw_owner_vhca_id = stc_attr->vport.esw_owner_vhca_id;
+ fixup_stc_attr->vport.eswitch_owner_vhca_id_valid =
+ stc_attr->vport.eswitch_owner_vhca_id_valid;
+ }
+ use_fixup = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return use_fixup;
+}
+
+int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
+ struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0};
+ bool use_fixup;
+ u32 obj_0_id;
+ int ret;
+
+ ret = mlx5hws_pool_chunk_alloc(stc_pool, stc);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate single action STC\n");
+ return ret;
+ }
+
+ stc_attr->stc_offset = stc->offset;
+
+ /* Dynamic reparse not supported, overwrite and use default */
+ if (!mlx5hws_context_cap_dynamic_reparse(ctx))
+ stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ obj_0_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+
+ /* According to table/action limitation change the stc_attr */
+ use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false);
+ ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id,
+ use_fixup ? &fixup_stc_attr : stc_attr);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to modify STC action_type %d tbl_type %d\n",
+ stc_attr->action_type, table_type);
+ goto free_chunk;
+ }
+
+ /* Modify the FDB peer */
+ if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
+ u32 obj_1_id;
+
+ obj_1_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+
+ use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr,
+ &fixup_stc_attr,
+ table_type, true);
+ ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_1_id,
+ use_fixup ? &fixup_stc_attr : stc_attr);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to modify peer STC action_type %d tbl_type %d\n",
+ stc_attr->action_type, table_type);
+ goto clean_obj_0;
+ }
+ }
+
+ return 0;
+
+clean_obj_0:
+ cleanup_stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ cleanup_stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ cleanup_stc_attr.stc_offset = stc->offset;
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id, &cleanup_stc_attr);
+free_chunk:
+ mlx5hws_pool_chunk_free(stc_pool, stc);
+ return ret;
+}
+
+void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ u32 obj_id;
+
+ /* Modify the STC not to point to an object */
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.stc_offset = stc->offset;
+ obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
+
+ if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
+ }
+
+ mlx5hws_pool_chunk_free(stc_pool, stc);
+}
+
+static u32 hws_action_get_mh_stc_type(struct mlx5hws_context *ctx,
+ __be64 pattern)
+{
+ u8 action_type = MLX5_GET(set_action_in, &pattern, action_type);
+
+ switch (action_type) {
+ case MLX5_MODIFICATION_TYPE_SET:
+ return MLX5_IFC_STC_ACTION_TYPE_SET;
+ case MLX5_MODIFICATION_TYPE_ADD:
+ return MLX5_IFC_STC_ACTION_TYPE_ADD;
+ case MLX5_MODIFICATION_TYPE_COPY:
+ return MLX5_IFC_STC_ACTION_TYPE_COPY;
+ case MLX5_MODIFICATION_TYPE_ADD_FIELD:
+ return MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD;
+ default:
+ mlx5hws_err(ctx, "Unsupported action type: 0x%x\n", action_type);
+ return MLX5_IFC_STC_ACTION_TYPE_NOP;
+ }
+}
+
+static void hws_action_fill_stc_attr(struct mlx5hws_action *action,
+ u32 obj_id,
+ struct mlx5hws_cmd_stc_modify_attr *attr)
+{
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_TAG:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_TAG;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ break;
+ case MLX5HWS_ACTION_TYP_DROP:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ break;
+ case MLX5HWS_ACTION_TYP_MISS:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ break;
+ case MLX5HWS_ACTION_TYP_CTR:
+ attr->id = obj_id;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_COUNTER;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW0;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ if (action->modify_header.require_reparse)
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+
+ if (action->modify_header.num_of_actions == 1) {
+ attr->modify_action.data = action->modify_header.single_action;
+ attr->action_type = hws_action_get_mh_stc_type(action->ctx,
+ attr->modify_action.data);
+
+ if (attr->action_type == MLX5_IFC_STC_ACTION_TYPE_ADD ||
+ attr->action_type == MLX5_IFC_STC_ACTION_TYPE_SET)
+ MLX5_SET(set_action_in, &attr->modify_action.data, data, 0);
+ } else {
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST;
+ attr->modify_header.arg_id = action->modify_header.arg_id;
+ attr->modify_header.pattern_id = action->modify_header.pat_id;
+ }
+ break;
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ attr->dest_table_id = obj_id;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->remove_header.decap = 1;
+ attr->remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ attr->remove_header.end_anchor = MLX5_HEADER_ANCHOR_INNER_MAC;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ if (!action->reformat.require_reparse)
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->insert_header.encap = action->reformat.encap;
+ attr->insert_header.insert_anchor = action->reformat.anchor;
+ attr->insert_header.arg_id = action->reformat.arg_id;
+ attr->insert_header.header_size = action->reformat.header_size;
+ attr->insert_header.insert_offset = action->reformat.offset;
+ break;
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO;
+ attr->aso.aso_type = ASO_OPC_MOD_POLICER;
+ attr->aso.devx_obj_id = obj_id;
+ attr->aso.return_reg_id = action->aso.return_reg_id;
+ break;
+ case MLX5HWS_ACTION_TYP_VPORT:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;
+ attr->vport.vport_num = action->vport.vport_num;
+ attr->vport.esw_owner_vhca_id = action->vport.esw_owner_vhca_id;
+ attr->vport.eswitch_owner_vhca_id_valid = action->vport.esw_owner_vhca_id_valid;
+ break;
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;
+ attr->remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN / 2;
+ break;
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->insert_header.encap = 0;
+ attr->insert_header.is_inline = 1;
+ attr->insert_header.insert_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ attr->insert_header.insert_offset = MLX5HWS_ACTION_HDR_LEN_L2_MACS;
+ attr->insert_header.header_size = MLX5HWS_ACTION_HDR_LEN_L2_VLAN;
+ break;
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ attr->remove_header.decap = 0; /* the mode we support decap is 0 */
+ attr->remove_words.start_anchor = action->remove_header.anchor;
+ /* the size is in already in words */
+ attr->remove_words.num_of_words = action->remove_header.size;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ break;
+ default:
+ mlx5hws_err(action->ctx, "Invalid action type %d\n", action->type);
+ }
+}
+
+static int
+hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_context *ctx = action->ctx;
+ int ret;
+
+ hws_action_fill_stc_attr(action, obj_id, &stc_attr);
+
+ /* Block unsupported parallel obj modify over the same base */
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Allocate STC for FDB */
+ if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) {
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
+ MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ if (ret)
+ goto out_err;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+out_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static void
+hws_action_destroy_stcs(struct mlx5hws_action *action)
+{
+ struct mlx5hws_context *ctx = action->ctx;
+
+ /* Block unsupported parallel obj modify over the same base */
+ mutex_lock(&ctx->ctrl_lock);
+
+ if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB)
+ mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static bool hws_action_is_flag_hws_fdb(u32 flags)
+{
+ return flags & MLX5HWS_ACTION_FLAG_HWS_FDB;
+}
+
+static bool
+hws_action_validate_hws_action(struct mlx5hws_context *ctx, u32 flags)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) {
+ mlx5hws_err(ctx, "Cannot create HWS action since HWS is not supported\n");
+ return false;
+ }
+
+ if ((flags & MLX5HWS_ACTION_FLAG_HWS_FDB) && !ctx->caps->eswitch_manager) {
+ mlx5hws_err(ctx, "Cannot create HWS action for FDB for non-eswitch-manager\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct mlx5hws_action *
+hws_action_create_generic_bulk(struct mlx5hws_context *ctx,
+ u32 flags,
+ enum mlx5hws_action_type action_type,
+ u8 bulk_sz)
+{
+ struct mlx5hws_action *action;
+ int i;
+
+ if (!hws_action_is_flag_hws_fdb(flags)) {
+ mlx5hws_err(ctx,
+ "Action (type: %d) flags must specify only HWS FDB\n", action_type);
+ return NULL;
+ }
+
+ if (!hws_action_validate_hws_action(ctx, flags))
+ return NULL;
+
+ action = kcalloc(bulk_sz, sizeof(*action), GFP_KERNEL);
+ if (!action)
+ return NULL;
+
+ for (i = 0; i < bulk_sz; i++) {
+ action[i].ctx = ctx;
+ action[i].flags = flags;
+ action[i].type = action_type;
+ }
+
+ return action;
+}
+
+static struct mlx5hws_action *
+hws_action_create_generic(struct mlx5hws_context *ctx,
+ u32 flags,
+ enum mlx5hws_action_type action_type)
+{
+ return hws_action_create_generic_bulk(ctx, flags, action_type, 1);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx,
+ u32 table_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TBL);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, table_id);
+ if (ret)
+ goto free_action;
+
+ action->dest_obj.obj_id = table_id;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl,
+ u32 flags)
+{
+ return mlx5hws_action_create_dest_table_num(ctx, tbl->ft_id, flags);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DROP);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_MISS);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TAG);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static struct mlx5hws_action *
+hws_action_create_aso(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type action_type,
+ u32 obj_id,
+ u8 return_reg_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, action_type);
+ if (!action)
+ return NULL;
+
+ action->aso.obj_id = obj_id;
+ action->aso.return_reg_id = return_reg_id;
+
+ ret = hws_action_create_stcs(action, obj_id);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u8 return_reg_id,
+ u32 flags)
+{
+ return hws_action_create_aso(ctx, MLX5HWS_ACTION_TYP_ASO_METER,
+ obj_id, return_reg_id, flags);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_counter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_CTR);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, obj_id);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx,
+ u16 vport_num,
+ bool vhca_id_valid,
+ u16 vhca_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!(flags & MLX5HWS_ACTION_FLAG_HWS_FDB)) {
+ mlx5hws_err(ctx, "Vport action is supported for FDB only\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_VPORT);
+ if (!action)
+ return NULL;
+
+ if (!ctx->caps->merged_eswitch && vhca_id_valid && vhca_id != ctx->caps->vhca_id) {
+ mlx5hws_err(ctx, "Non merged eswitch cannot send to other vhca\n");
+ goto free_action;
+ }
+
+ action->vport.vport_num = vport_num;
+ action->vport.esw_owner_vhca_id_valid = vhca_id_valid;
+
+ if (vhca_id_valid)
+ action->vport.esw_owner_vhca_id = vhca_id;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for vport %d\n", vport_num);
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_PUSH_VLAN);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for push vlan\n");
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_POP_VLAN);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create remove stc for reformat\n");
+ goto free_action;
+ }
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for pop vlan\n");
+ goto free_shared;
+ }
+
+ return action;
+
+free_shared:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static int
+hws_action_handle_insert_with_ptr(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ size_t max_sz = 0;
+ u32 arg_id;
+ int ret, i;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].sz % W_SIZE != 0) {
+ mlx5hws_err(action->ctx,
+ "Header data size should be in WORD granularity\n");
+ return -EINVAL;
+ }
+ max_sz = max(hdrs[i].sz, max_sz);
+ }
+
+ /* Allocate single shared arg object for all headers */
+ ret = mlx5hws_arg_create(action->ctx,
+ hdrs->data,
+ max_sz,
+ log_bulk_sz,
+ action->flags & MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ action[i].reformat.arg_id = arg_id;
+ action[i].reformat.header_size = hdrs[i].sz;
+ action[i].reformat.num_of_hdrs = num_of_hdrs;
+ action[i].reformat.max_hdr_sz = max_sz;
+ action[i].reformat.require_reparse = true;
+
+ if (action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2 ||
+ action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3) {
+ action[i].reformat.anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ action[i].reformat.offset = 0;
+ action[i].reformat.encap = 1;
+ }
+
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ mlx5hws_err(action->ctx, "Failed to create stc for reformat\n");
+ goto free_stc;
+ }
+ }
+
+ return 0;
+
+free_stc:
+ while (i--)
+ hws_action_destroy_stcs(&action[i]);
+
+ mlx5hws_arg_destroy(action->ctx, arg_id);
+ return ret;
+}
+
+static int
+hws_action_handle_l2_to_tunnel_l3(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ int ret;
+
+ /* The action is remove-l2-header + insert-l3-header */
+ ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ if (ret) {
+ mlx5hws_err(action->ctx, "Failed to create remove stc for reformat\n");
+ return ret;
+ }
+
+ /* Reuse the insert with pointer for the L2L3 header */
+ ret = hws_action_handle_insert_with_ptr(action,
+ num_of_hdrs,
+ hdrs,
+ log_bulk_sz);
+ if (ret)
+ goto put_shared_stc;
+
+ return 0;
+
+put_shared_stc:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ return ret;
+}
+
+static void hws_action_prepare_decap_l3_actions(size_t data_sz,
+ u8 *mh_data,
+ int *num_of_actions)
+{
+ int actions;
+ u32 i;
+
+ /* Remove L2L3 outer headers */
+ MLX5_SET(stc_ste_param_remove, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE);
+ MLX5_SET(stc_ste_param_remove, mh_data, decap, 0x1);
+ MLX5_SET(stc_ste_param_remove, mh_data, remove_start_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ MLX5_SET(stc_ste_param_remove, mh_data, remove_end_anchor,
+ MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4);
+ mh_data += MLX5HWS_ACTION_DOUBLE_SIZE; /* Assume every action is 2 dw */
+ actions = 1;
+
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
+ for (i = 0; i < data_sz / MLX5HWS_ACTION_INLINE_DATA_SIZE + 1; i++) {
+ MLX5_SET(stc_ste_param_insert, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_INSERT);
+ MLX5_SET(stc_ste_param_insert, mh_data, inline_data, 0x1);
+ MLX5_SET(stc_ste_param_insert, mh_data, insert_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ MLX5_SET(stc_ste_param_insert, mh_data, insert_size, 2);
+ mh_data += MLX5HWS_ACTION_DOUBLE_SIZE;
+ actions++;
+ }
+
+ /* Remove first 2 extra bytes */
+ MLX5_SET(stc_ste_param_remove_words, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
+ MLX5_SET(stc_ste_param_remove_words, mh_data, remove_start_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ /* The hardware expects here size in words (2 bytes) */
+ MLX5_SET(stc_ste_param_remove_words, mh_data, remove_size, 1);
+ actions++;
+
+ *num_of_actions = actions;
+}
+
+static int
+hws_action_handle_tunnel_l3_to_l2(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ u8 mh_data[MLX5HWS_ACTION_REFORMAT_DATA_SIZE] = {0};
+ struct mlx5hws_context *ctx = action->ctx;
+ u32 arg_id, pat_id;
+ int num_of_actions;
+ int mh_data_size;
+ int ret, i;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2 &&
+ hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN) {
+ mlx5hws_err(ctx, "Data size is not supported for decap-l3\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Create a full modify header action list in case shared */
+ hws_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions);
+ if (action->flags & MLX5HWS_ACTION_FLAG_SHARED)
+ mlx5hws_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions);
+
+ /* All DecapL3 cases require the same max arg size */
+ ret = mlx5hws_arg_create_modify_header_arg(ctx,
+ (__be64 *)mh_data,
+ num_of_actions,
+ log_bulk_sz,
+ action->flags & MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ memset(mh_data, 0, MLX5HWS_ACTION_REFORMAT_DATA_SIZE);
+ hws_action_prepare_decap_l3_actions(hdrs[i].sz, mh_data, &num_of_actions);
+ mh_data_size = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
+
+ ret = mlx5hws_pat_get_pattern(ctx, (__be64 *)mh_data, mh_data_size, &pat_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate pattern for DecapL3\n");
+ goto free_stc_and_pat;
+ }
+
+ action[i].modify_header.max_num_of_actions = num_of_actions;
+ action[i].modify_header.num_of_actions = num_of_actions;
+ action[i].modify_header.num_of_patterns = num_of_hdrs;
+ action[i].modify_header.arg_id = arg_id;
+ action[i].modify_header.pat_id = pat_id;
+ action[i].modify_header.require_reparse =
+ mlx5hws_pat_require_reparse((__be64 *)mh_data, num_of_actions);
+
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ mlx5hws_pat_put_pattern(ctx, pat_id);
+ goto free_stc_and_pat;
+ }
+ }
+
+ return 0;
+
+free_stc_and_pat:
+ while (i--) {
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id);
+ }
+
+ mlx5hws_arg_destroy(action->ctx, arg_id);
+ return ret;
+}
+
+static int
+hws_action_create_reformat_hws(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 bulk_size)
+{
+ int ret;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ ret = hws_action_create_stcs(action, 0);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ ret = hws_action_handle_l2_to_tunnel_l3(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ ret = hws_action_handle_tunnel_l3_to_l2(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ default:
+ mlx5hws_err(action->ctx, "Invalid HWS reformat action type\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_reformat(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type reformat_type,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!num_of_hdrs) {
+ mlx5hws_err(ctx, "Reformat num_of_hdrs cannot be zero\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic_bulk(ctx, flags, reformat_type, num_of_hdrs);
+ if (!action)
+ return NULL;
+
+ if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1)) {
+ mlx5hws_err(ctx, "Reformat flags don't fit HWS (flags: 0x%x)\n", flags);
+ goto free_action;
+ }
+
+ ret = hws_action_create_reformat_hws(action, num_of_hdrs, hdrs, log_bulk_size);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create HWS reformat action\n");
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static int
+hws_action_create_modify_header_hws(struct mlx5hws_action *action,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *pattern,
+ u32 log_bulk_size)
+{
+ struct mlx5hws_context *ctx = action->ctx;
+ u16 num_actions, max_mh_actions = 0;
+ int i, ret, size_in_bytes;
+ u32 pat_id, arg_id = 0;
+ __be64 *new_pattern;
+ size_t pat_max_sz;
+
+ pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE;
+ size_in_bytes = pat_max_sz * sizeof(__be64);
+ new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL);
+ if (!new_pattern)
+ return -ENOMEM;
+
+ /* Calculate maximum number of mh actions for shared arg allocation */
+ for (i = 0; i < num_of_patterns; i++) {
+ size_t new_num_actions;
+ size_t cur_num_actions;
+ u32 nope_location;
+
+ cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
+
+ mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions,
+ pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE,
+ &new_num_actions, &nope_location,
+ &new_pattern[i * pat_max_sz]);
+
+ action[i].modify_header.nope_locations = nope_location;
+ action[i].modify_header.num_of_actions = new_num_actions;
+
+ max_mh_actions = max(max_mh_actions, new_num_actions);
+ }
+
+ if (mlx5hws_arg_get_arg_log_size(max_mh_actions) >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
+ mlx5hws_err(ctx, "Num of actions (%d) bigger than allowed\n",
+ max_mh_actions);
+ ret = -EINVAL;
+ goto free_new_pat;
+ }
+
+ /* Allocate single shared arg for all patterns based on the max size */
+ if (max_mh_actions > 1) {
+ ret = mlx5hws_arg_create_modify_header_arg(ctx,
+ pattern->data,
+ max_mh_actions,
+ log_bulk_size,
+ action->flags &
+ MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ goto free_new_pat;
+ }
+
+ for (i = 0; i < num_of_patterns; i++) {
+ if (!mlx5hws_pat_verify_actions(ctx, pattern[i].data, pattern[i].sz)) {
+ mlx5hws_err(ctx, "Fail to verify pattern modify actions\n");
+ ret = -EINVAL;
+ goto free_stc_and_pat;
+ }
+ num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
+ action[i].modify_header.num_of_patterns = num_of_patterns;
+ action[i].modify_header.max_num_of_actions = max_mh_actions;
+
+ action[i].modify_header.require_reparse =
+ mlx5hws_pat_require_reparse(pattern[i].data, num_actions);
+
+ if (num_actions == 1) {
+ pat_id = 0;
+ /* Optimize single modify action to be used inline */
+ action[i].modify_header.single_action = pattern[i].data[0];
+ action[i].modify_header.single_action_type =
+ MLX5_GET(set_action_in, pattern[i].data, action_type);
+ } else {
+ /* Multiple modify actions require a pattern */
+ if (unlikely(action[i].modify_header.nope_locations)) {
+ size_t pattern_sz;
+
+ pattern_sz = action[i].modify_header.num_of_actions *
+ MLX5HWS_MODIFY_ACTION_SIZE;
+ ret =
+ mlx5hws_pat_get_pattern(ctx,
+ &new_pattern[i * pat_max_sz],
+ pattern_sz, &pat_id);
+ } else {
+ ret = mlx5hws_pat_get_pattern(ctx,
+ pattern[i].data,
+ pattern[i].sz,
+ &pat_id);
+ }
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to allocate pattern for modify header\n");
+ goto free_stc_and_pat;
+ }
+
+ action[i].modify_header.arg_id = arg_id;
+ action[i].modify_header.pat_id = pat_id;
+ }
+ /* Allocate STC for each action representing a header */
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ if (pat_id)
+ mlx5hws_pat_put_pattern(ctx, pat_id);
+ goto free_stc_and_pat;
+ }
+ }
+
+ kfree(new_pattern);
+ return 0;
+
+free_stc_and_pat:
+ while (i--) {
+ hws_action_destroy_stcs(&action[i]);
+ if (action[i].modify_header.pat_id)
+ mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id);
+ }
+
+ if (arg_id)
+ mlx5hws_arg_destroy(ctx, arg_id);
+free_new_pat:
+ kfree(new_pattern);
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *patterns,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!num_of_patterns) {
+ mlx5hws_err(ctx, "Invalid number of patterns\n");
+ return NULL;
+ }
+ action = hws_action_create_generic_bulk(ctx, flags,
+ MLX5HWS_ACTION_TYP_MODIFY_HDR,
+ num_of_patterns);
+ if (!action)
+ return NULL;
+
+ if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_patterns > 1)) {
+ mlx5hws_err(ctx, "Action cannot be shared with requested pattern or size\n");
+ goto free_action;
+ }
+
+ ret = hws_action_create_modify_header_hws(action,
+ num_of_patterns,
+ patterns,
+ log_bulk_size);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
+ size_t num_dest,
+ struct mlx5hws_action_dest_attr *dests,
+ bool ignore_flow_level,
+ u32 flow_source,
+ u32 flags)
+{
+ struct mlx5hws_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ struct mlx5hws_action *action;
+ u32 i /*, packet_reformat_id*/;
+ int ret;
+
+ if (num_dest <= 1) {
+ mlx5hws_err(ctx, "Action must have multiple dests\n");
+ return NULL;
+ }
+
+ if (flags == (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+ } else {
+ mlx5hws_err(ctx, "Action flags not supported\n");
+ return NULL;
+ }
+
+ dest_list = kcalloc(num_dest, sizeof(*dest_list), GFP_KERNEL);
+ if (!dest_list)
+ return NULL;
+
+ for (i = 0; i < num_dest; i++) {
+ enum mlx5hws_action_type action_type = dests[i].dest->type;
+ struct mlx5hws_action *reformat_action = dests[i].reformat;
+
+ switch (action_type) {
+ case MLX5HWS_ACTION_TYP_TBL:
+ dest_list[i].destination_type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = ignore_flow_level;
+ /* ToDo: In SW steering we have a handling of 'go to WIRE'
+ * destination here by upper layer setting 'is_wire_ft' flag
+ * if the destination is wire.
+ * This is because uplink should be last dest in the list.
+ */
+ break;
+ case MLX5HWS_ACTION_TYP_VPORT:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id = dests[i].dest->vport.vport_num;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (ctx->caps->merged_eswitch) {
+ dest_list[i].ext_flags |=
+ MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID;
+ dest_list[i].esw_owner_vhca_id =
+ dests[i].dest->vport.esw_owner_vhca_id;
+ }
+ break;
+ default:
+ mlx5hws_err(ctx, "Unsupported action in dest_array\n");
+ goto free_dest_list;
+ }
+
+ if (reformat_action) {
+ mlx5hws_err(ctx, "dest_array with reformat action - unsupported\n");
+ goto free_dest_list;
+ }
+ }
+
+ fte_attr.dests_num = num_dest;
+ fte_attr.dests = dest_list;
+
+ fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!fw_island)
+ goto free_dest_list;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DEST_ARRAY);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = hws_action_create_stcs(action, fw_island->ft_id);
+ if (ret)
+ goto free_action;
+
+ action->dest_array.fw_island = fw_island;
+ action->dest_array.num_dest = num_dest;
+ action->dest_array.dest_list = dest_list;
+
+ return action;
+
+free_action:
+ kfree(action);
+destroy_fw_island:
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island);
+free_dest_list:
+ for (i = 0; i < num_dest; i++) {
+ if (dest_list[i].ext_reformat_id)
+ mlx5hws_cmd_packet_reformat_destroy(ctx->mdev,
+ dest_list[i].ext_reformat_id);
+ }
+ kfree(dest_list);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_insert_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action_reformat_header *reformat_hdrs;
+ struct mlx5hws_action *action;
+ int ret;
+ int i;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_INSERT_HEADER);
+ if (!action)
+ return NULL;
+
+ reformat_hdrs = kcalloc(num_of_hdrs, sizeof(*reformat_hdrs), GFP_KERNEL);
+ if (!reformat_hdrs)
+ goto free_action;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].offset % W_SIZE != 0) {
+ mlx5hws_err(ctx, "Header offset should be in WORD granularity\n");
+ goto free_reformat_hdrs;
+ }
+
+ action[i].reformat.anchor = hdrs[i].anchor;
+ action[i].reformat.encap = hdrs[i].encap;
+ action[i].reformat.offset = hdrs[i].offset;
+
+ reformat_hdrs[i].sz = hdrs[i].hdr.sz;
+ reformat_hdrs[i].data = hdrs[i].hdr.data;
+ }
+
+ ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs,
+ reformat_hdrs, log_bulk_size);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create HWS reformat action\n");
+ goto free_reformat_hdrs;
+ }
+
+ kfree(reformat_hdrs);
+
+ return action;
+
+free_reformat_hdrs:
+ kfree(reformat_hdrs);
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_remove_header_attr *attr,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_REMOVE_HEADER);
+ if (!action)
+ return NULL;
+
+ /* support only remove anchor with size */
+ if (attr->size % W_SIZE != 0) {
+ mlx5hws_err(ctx,
+ "Invalid size, HW supports header remove in WORD granularity\n");
+ goto free_action;
+ }
+
+ if (attr->size > MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE) {
+ mlx5hws_err(ctx, "Header removal size limited to %u bytes\n",
+ MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE);
+ goto free_action;
+ }
+
+ action->remove_header.anchor = attr->anchor;
+ action->remove_header.size = attr->size / W_SIZE;
+
+ if (hws_action_create_stcs(action, 0))
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static struct mlx5hws_definer *
+hws_action_create_dest_match_range_definer(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_definer *definer;
+ __be32 *tag;
+ int ret;
+
+ definer = kzalloc(sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return NULL;
+
+ definer->dw_selector[0] = MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN / 4;
+ /* Set DW0 tag mask */
+ tag = (__force __be32 *)definer->mask.jumbo;
+ tag[MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0] = htonl(0xffffUL << 16);
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ ret = mlx5hws_definer_get_obj(ctx, definer);
+ if (ret < 0) {
+ mutex_unlock(&ctx->ctrl_lock);
+ kfree(definer);
+ return NULL;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+ definer->obj_id = ret;
+
+ return definer;
+}
+
+static struct mlx5hws_matcher_action_ste *
+hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer,
+ u32 miss_ft_id)
+{
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
+ struct mlx5hws_action_default_stc *default_stc;
+ struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_pool_attr pool_attr = {0};
+ struct mlx5hws_pool *ste_pool, *stc_pool;
+ struct mlx5hws_pool_chunk *ste;
+ u32 *rtc_0_id, *rtc_1_id;
+ u32 obj_id;
+ int ret;
+
+ /* Check if STE range is supported */
+ if (!IS_BIT_SET(ctx->caps->supp_ste_format_gen_wqe, MLX5_IFC_RTC_STE_FORMAT_RANGE)) {
+ mlx5hws_err(ctx, "Range STE format not supported\n");
+ return NULL;
+ }
+
+ table_ste = kzalloc(sizeof(*table_ste), GFP_KERNEL);
+ if (!table_ste)
+ return NULL;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
+ pool_attr.alloc_log_sz = 1;
+ table_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!table_ste->pool) {
+ mlx5hws_err(ctx, "Failed to allocate memory ste pool\n");
+ goto free_ste;
+ }
+
+ /* Allocate RTC */
+ rtc_0_id = &table_ste->rtc_0_id;
+ rtc_1_id = &table_ste->rtc_1_id;
+ ste_pool = table_ste->pool;
+ ste = &table_ste->ste;
+ ste->order = 1;
+
+ rtc_attr.log_size = 0;
+ rtc_attr.log_depth = 0;
+ rtc_attr.miss_ft_id = miss_ft_id;
+ rtc_attr.num_hash_definer = 1;
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+ rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+ rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+ rtc_attr.fw_gen_wqe = true;
+ rtc_attr.is_scnd_range = true;
+
+ obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.ste_offset = ste->offset;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false);
+
+ /* STC is a single resource (obj_id), use any STC for the ID */
+ stc_pool = ctx->stc_pool[MLX5HWS_TABLE_TYPE_FDB];
+ default_stc = ctx->common_res[MLX5HWS_TABLE_TYPE_FDB].default_stc;
+ obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create RTC");
+ goto pool_destroy;
+ }
+
+ /* Create mirror RTC */
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, true);
+
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create mirror RTC");
+ goto destroy_rtc_0;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return table_ste;
+
+destroy_rtc_0:
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
+pool_destroy:
+ mlx5hws_pool_destroy(table_ste->pool);
+free_ste:
+ mutex_unlock(&ctx->ctrl_lock);
+ kfree(table_ste);
+ return NULL;
+}
+
+static void
+hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_matcher_action_ste *table_ste)
+{
+ mutex_lock(&ctx->ctrl_lock);
+
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_1_id);
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_0_id);
+ mlx5hws_pool_destroy(table_ste->pool);
+ kfree(table_ste);
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static int
+hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_matcher_action_ste *table_ste,
+ struct mlx5hws_action *hit_ft_action,
+ struct mlx5hws_definer *range_definer,
+ u32 min, u32 max)
+{
+ struct mlx5hws_wqe_gta_data_seg_ste match_wqe_data = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste range_wqe_data = {0};
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
+ u32 no_use, used_rtc_0_id, used_rtc_1_id, ret;
+ struct mlx5hws_context_common_res *common_res;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+ __be32 *wqe_data_arr;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Get the control queue */
+ queue = &ctx->send_queue[ctx->queues - 1];
+ if (unlikely(mlx5hws_send_engine_err(queue))) {
+ ret = -EIO;
+ goto error;
+ }
+
+ /* Init default send STE attributes */
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.user_data = &no_use;
+ ste_attr.send_attr.rule = NULL;
+ ste_attr.send_attr.fence = 1;
+ ste_attr.send_attr.notify_hw = true;
+ ste_attr.rtc_0 = table_ste->rtc_0_id;
+ ste_attr.rtc_1 = table_ste->rtc_1_id;
+ ste_attr.used_id_rtc_0 = &used_rtc_0_id;
+ ste_attr.used_id_rtc_1 = &used_rtc_1_id;
+
+ common_res = &ctx->common_res[MLX5HWS_TABLE_TYPE_FDB];
+
+ /* init an empty match STE which will always hit */
+ ste_attr.wqe_ctrl = &wqe_ctrl;
+ ste_attr.wqe_data = &match_wqe_data;
+ ste_attr.send_attr.match_definer_id = ctx->caps->trivial_match_definer;
+
+ /* Fill WQE control data */
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] =
+ htonl(common_res->default_stc->nop_ctr.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(common_res->default_stc->nop_dw5.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] =
+ htonl(common_res->default_stc->nop_dw6.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] =
+ htonl(common_res->default_stc->nop_dw7.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
+ htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
+ htonl(hit_ft_action->stc[MLX5HWS_TABLE_TYPE_FDB].offset);
+
+ wqe_data_arr = (__force __be32 *)&range_wqe_data;
+
+ ste_attr.range_wqe_data = &range_wqe_data;
+ ste_attr.send_attr.len += MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.range_definer_id = mlx5hws_definer_get_id(range_definer);
+
+ /* Fill range matching fields,
+ * min/max_value_2 corresponds to match_dw_0 in its definer,
+ * min_value_2 sets in DW0 in the STE and max_value_2 sets in DW1 in the STE.
+ */
+ wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW0] = htonl(min << 16);
+ wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW1] = htonl(max << 16);
+
+ /* Send WQEs to FW */
+ mlx5hws_send_stes_fw(ctx, queue, &ste_attr);
+
+ /* Poll for completion */
+ ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to drain control queue");
+ goto error;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+error:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
+ u32 field,
+ struct mlx5_flow_table *hit_ft,
+ struct mlx5_flow_table *miss_ft,
+ u32 min, u32 max, u32 flags)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_action *hit_ft_action;
+ struct mlx5hws_definer *definer;
+ struct mlx5hws_action *action;
+ u32 miss_ft_id = miss_ft->id;
+ u32 hit_ft_id = hit_ft->id;
+ int ret;
+
+ if (field != MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN ||
+ min > 0xffff || max > 0xffff) {
+ mlx5hws_err(ctx, "Invalid match range parameters\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_RANGE);
+ if (!action)
+ return NULL;
+
+ definer = hws_action_create_dest_match_range_definer(ctx);
+ if (!definer)
+ goto free_action;
+
+ table_ste = hws_action_create_dest_match_range_table(ctx, definer, miss_ft_id);
+ if (!table_ste)
+ goto destroy_definer;
+
+ hit_ft_action = mlx5hws_action_create_dest_table_num(ctx, hit_ft_id, flags);
+ if (!hit_ft_action)
+ goto destroy_table_ste;
+
+ ret = hws_action_create_dest_match_range_fill_table(ctx, table_ste,
+ hit_ft_action,
+ definer, min, max);
+ if (ret)
+ goto destroy_hit_ft_action;
+
+ action->range.table_ste = table_ste;
+ action->range.definer = definer;
+ action->range.hit_ft_action = hit_ft_action;
+
+ /* Allocate STC for jumps to STE */
+ mutex_lock(&ctx->ctrl_lock);
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.ste_table.ste = table_ste->ste;
+ stc_attr.ste_table.ste_pool = table_ste->pool;
+ stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ if (ret)
+ goto error_unlock;
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return action;
+
+error_unlock:
+ mutex_unlock(&ctx->ctrl_lock);
+destroy_hit_ft_action:
+ mlx5hws_action_destroy(hit_ft_action);
+destroy_table_ste:
+ hws_action_destroy_dest_match_range_table(ctx, table_ste);
+destroy_definer:
+ mlx5hws_definer_free(ctx, definer);
+free_action:
+ kfree(action);
+ mlx5hws_err(ctx, "Failed to create action dest match range");
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags)
+{
+ return hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_LAST);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
+ u32 sampler_id, u32 flags)
+{
+ mlx5hws_err(ctx, "Flow sampler action - unsupported\n");
+ return NULL;
+}
+
+static void hws_action_destroy_hws(struct mlx5hws_action *action)
+{
+ u32 ext_reformat_id;
+ bool shared_arg;
+ u32 obj_id;
+ u32 i;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_MISS:
+ case MLX5HWS_ACTION_TYP_TAG:
+ case MLX5HWS_ACTION_TYP_DROP:
+ case MLX5HWS_ACTION_TYP_CTR:
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ case MLX5HWS_ACTION_TYP_VPORT:
+ hws_action_destroy_stcs(action);
+ break;
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ hws_action_destroy_stcs(action);
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+ break;
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ hws_action_destroy_stcs(action);
+ mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev, action->dest_array.fw_island);
+ for (i = 0; i < action->dest_array.num_dest; i++) {
+ ext_reformat_id = action->dest_array.dest_list[i].ext_reformat_id;
+ if (ext_reformat_id)
+ mlx5hws_cmd_packet_reformat_destroy(action->ctx->mdev,
+ ext_reformat_id);
+ }
+ kfree(action->dest_array.dest_list);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ shared_arg = false;
+ for (i = 0; i < action->modify_header.num_of_patterns; i++) {
+ hws_action_destroy_stcs(&action[i]);
+ if (action[i].modify_header.num_of_actions > 1) {
+ mlx5hws_pat_put_pattern(action[i].ctx,
+ action[i].modify_header.pat_id);
+ /* Save shared arg object to be freed after */
+ obj_id = action[i].modify_header.arg_id;
+ shared_arg = true;
+ }
+ }
+ if (shared_arg)
+ mlx5hws_arg_destroy(action->ctx, obj_id);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ for (i = 0; i < action->reformat.num_of_hdrs; i++)
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id);
+ break;
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ for (i = 0; i < action->reformat.num_of_hdrs; i++)
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id);
+ break;
+ case MLX5HWS_ACTION_TYP_RANGE:
+ hws_action_destroy_stcs(action);
+ hws_action_destroy_dest_match_range_table(action->ctx, action->range.table_ste);
+ mlx5hws_definer_free(action->ctx, action->range.definer);
+ mlx5hws_action_destroy(action->range.hit_ft_action);
+ break;
+ case MLX5HWS_ACTION_TYP_LAST:
+ break;
+ default:
+ pr_warn("HWS: Invalid action type: %d\n", action->type);
+ }
+}
+
+int mlx5hws_action_destroy(struct mlx5hws_action *action)
+{
+ hws_action_destroy_hws(action);
+
+ kfree(action);
+ return 0;
+}
+
+int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx, u8 tbl_type)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_action_default_stc *default_stc;
+ int ret;
+
+ if (ctx->common_res[tbl_type].default_stc) {
+ ctx->common_res[tbl_type].default_stc->refcount++;
+ return 0;
+ }
+
+ default_stc = kzalloc(sizeof(*default_stc), GFP_KERNEL);
+ if (!default_stc)
+ return -ENOMEM;
+
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW0;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_ctr);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default counter STC\n");
+ goto free_default_stc;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw5);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW5 STC\n");
+ goto free_nop_ctr;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw6);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW6 STC\n");
+ goto free_nop_dw5;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW7;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw7);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW7 STC\n");
+ goto free_nop_dw6;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->default_hit);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default allow STC\n");
+ goto free_nop_dw7;
+ }
+
+ ctx->common_res[tbl_type].default_stc = default_stc;
+ ctx->common_res[tbl_type].default_stc->refcount++;
+
+ return 0;
+
+free_nop_dw7:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);
+free_nop_dw6:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);
+free_nop_dw5:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
+free_nop_ctr:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
+free_default_stc:
+ kfree(default_stc);
+ return ret;
+}
+
+void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx, u8 tbl_type)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_action_default_stc *default_stc;
+
+ default_stc = ctx->common_res[tbl_type].default_stc;
+
+ default_stc = ctx->common_res[tbl_type].default_stc;
+ if (--default_stc->refcount)
+ return;
+
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->default_hit);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
+ kfree(default_stc);
+ ctx->common_res[tbl_type].default_stc = NULL;
+}
+
+static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions,
+ u32 nope_locations)
+{
+ u8 *new_arg_data = NULL;
+ int i, j;
+
+ if (unlikely(nope_locations)) {
+ new_arg_data = kcalloc(num_of_actions,
+ MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
+ if (unlikely(!new_arg_data))
+ return;
+
+ for (i = 0, j = 0; i < num_of_actions; i++, j++) {
+ memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE);
+ if (BIT(i) & nope_locations)
+ j++;
+ }
+ }
+
+ mlx5hws_arg_write(queue, NULL, arg_idx,
+ new_arg_data ? new_arg_data : arg_data,
+ num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE);
+
+ kfree(new_arg_data);
+}
+
+void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst, u16 num_of_actions)
+{
+ u8 *e_src;
+ int i;
+
+ /* num_of_actions = remove l3l2 + 4/5 inserts + remove extra 2 bytes
+ * copy from end of src to the start of dst.
+ * move to the end, 2 is the leftover from 14B or 18B
+ */
+ if (num_of_actions == DECAP_L3_NUM_ACTIONS_W_NO_VLAN)
+ e_src = src + MLX5HWS_ACTION_HDR_LEN_L2;
+ else
+ e_src = src + MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN;
+
+ /* Move dst over the first remove action + zero data */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE;
+ /* Move dst over the first insert ctrl action */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE / 2;
+ /* Actions:
+ * no vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.
+ * with vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.
+ * the loop is without the last insertion.
+ */
+ for (i = 0; i < num_of_actions - 3; i++) {
+ e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE;
+ memcpy(dst, e_src, MLX5HWS_ACTION_INLINE_DATA_SIZE); /* data */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE;
+ }
+ /* Copy the last 2 bytes after a gap of 2 bytes which will be removed */
+ e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE / 2;
+ dst += MLX5HWS_ACTION_INLINE_DATA_SIZE / 2;
+ memcpy(dst, e_src, 2);
+}
+
+static int
+hws_action_get_shared_stc_offset(struct mlx5hws_context_common_res *common_res,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ return common_res->shared_stc[stc_type]->stc_chunk.offset;
+}
+
+static struct mlx5hws_actions_wqe_setter *
+hws_action_setter_find_first(struct mlx5hws_actions_wqe_setter *setter,
+ u8 req_flags)
+{
+ /* Use a new setter if requested flags are taken */
+ while (setter->flags & req_flags)
+ setter++;
+
+ /* Use current setter in required flags are not used */
+ return setter;
+}
+
+static void
+hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply,
+ enum mlx5hws_action_stc_idx stc_idx,
+ u8 action_idx)
+{
+ struct mlx5hws_action *action = apply->rule_action[action_idx].action;
+
+ apply->wqe_ctrl->stc_ix[stc_idx] =
+ htonl(action->stc[apply->tbl_type].offset);
+}
+
+static void
+hws_action_setter_push_vlan(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = rule_action->push_vlan.vlan_hdr;
+
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+}
+
+static void
+hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_sz, arg_idx;
+ u8 *single_action;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action;
+
+ stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+
+ if (action->modify_header.num_of_actions == 1) {
+ if (action->modify_header.single_action_type ==
+ MLX5_MODIFICATION_TYPE_COPY ||
+ action->modify_header.single_action_type ==
+ MLX5_MODIFICATION_TYPE_ADD_FIELD) {
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ return;
+ }
+
+ if (action->flags & MLX5HWS_ACTION_FLAG_SHARED)
+ single_action = (u8 *)&action->modify_header.single_action;
+ else
+ single_action = rule_action->modify_header.data;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] =
+ *(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data);
+ } else {
+ /* Argument offset multiple with number of args per these actions */
+ arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
+ arg_idx = rule_action->modify_header.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ hws_action_modify_write(apply->queue,
+ action->modify_header.arg_id + arg_idx,
+ rule_action->modify_header.data,
+ action->modify_header.num_of_actions,
+ action->modify_header.nope_locations);
+ }
+ }
+}
+
+static void
+hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_idx, arg_sz;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action + rule_action->reformat.hdr_idx;
+
+ /* Argument offset multiple on args required for header size */
+ arg_sz = mlx5hws_arg_data_size_to_arg_size(action->reformat.max_hdr_sz);
+ arg_idx = rule_action->reformat.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ mlx5hws_arg_write(apply->queue, NULL,
+ action->reformat.arg_id + arg_idx,
+ rule_action->reformat.data,
+ action->reformat.header_size);
+ }
+}
+
+static void
+hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_sz, arg_idx;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action + rule_action->reformat.hdr_idx;
+
+ /* Argument offset multiple on args required for num of actions */
+ arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
+ arg_idx = rule_action->reformat.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ mlx5hws_arg_decapl3_write(apply->queue,
+ action->modify_header.arg_id + arg_idx,
+ rule_action->reformat.data,
+ action->modify_header.num_of_actions);
+ }
+}
+
+static void
+hws_action_setter_aso(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ u32 exe_aso_ctrl;
+ u32 offset;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+
+ switch (rule_action->action->type) {
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ /* exe_aso_ctrl format:
+ * [STC only and reserved bits 29b][init_color 2b][meter_id 1b]
+ */
+ offset = rule_action->aso_meter.offset / MLX5_ASO_METER_NUM_PER_OBJ;
+ exe_aso_ctrl = rule_action->aso_meter.offset % MLX5_ASO_METER_NUM_PER_OBJ;
+ exe_aso_ctrl |= rule_action->aso_meter.init_color <<
+ MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET;
+ break;
+ default:
+ mlx5hws_err(rule_action->action->ctx,
+ "Unsupported ASO action type: %d\n", rule_action->action->type);
+ return;
+ }
+
+ /* aso_object_offset format: [24B] */
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = htonl(offset);
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(exe_aso_ctrl);
+
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+}
+
+static void
+hws_action_setter_tag(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_single];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = htonl(rule_action->tag.value);
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single);
+}
+
+static void
+hws_action_setter_ctrl_ctr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_ctr];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = htonl(rule_action->counter.offset);
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_CTRL, setter->idx_ctr);
+}
+
+static void
+hws_action_setter_single(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single);
+}
+
+static void
+hws_action_setter_single_double_pop(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(hws_action_get_shared_stc_offset(apply->common_res,
+ MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP));
+}
+
+static void
+hws_action_setter_hit(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit);
+}
+
+static void
+hws_action_setter_default_hit(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
+ htonl(apply->common_res->default_stc->default_hit.offset);
+}
+
+static void
+hws_action_setter_hit_next_action(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = htonl(apply->next_direct_idx << 6);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] = htonl(apply->jump_to_action_stc);
+}
+
+static void
+hws_action_setter_common_decap(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(hws_action_get_shared_stc_offset(apply->common_res,
+ MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3));
+}
+
+static void
+hws_action_setter_range(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ /* Always jump to index zero */
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit);
+}
+
+int mlx5hws_action_template_process(struct mlx5hws_action_template *at)
+{
+ struct mlx5hws_actions_wqe_setter *start_setter = at->setters + 1;
+ enum mlx5hws_action_type *action_type = at->action_type_arr;
+ struct mlx5hws_actions_wqe_setter *setter = at->setters;
+ struct mlx5hws_actions_wqe_setter *pop_setter = NULL;
+ struct mlx5hws_actions_wqe_setter *last_setter;
+ int i;
+
+ /* Note: Given action combination must be valid */
+
+ /* Check if action were already processed */
+ if (at->num_of_action_stes)
+ return 0;
+
+ for (i = 0; i < MLX5HWS_ACTION_MAX_STE; i++)
+ setter[i].set_hit = &hws_action_setter_hit_next_action;
+
+ /* The same action template setters can be used with jumbo or match
+ * STE, to support both cases we reserve the first setter for cases
+ * with jumbo STE to allow jump to the first action STE.
+ * This extra setter can be reduced in some cases on rule creation.
+ */
+ setter = start_setter;
+ last_setter = start_setter;
+
+ for (i = 0; i < at->num_actions; i++) {
+ switch (action_type[i]) {
+ case MLX5HWS_ACTION_TYP_DROP:
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_VPORT:
+ case MLX5HWS_ACTION_TYP_MISS:
+ /* Hit action */
+ last_setter->flags |= ASF_HIT;
+ last_setter->set_hit = &hws_action_setter_hit;
+ last_setter->idx_hit = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_RANGE:
+ last_setter->flags |= ASF_HIT;
+ last_setter->set_hit = &hws_action_setter_range;
+ last_setter->idx_hit = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ /* Single remove header to header */
+ if (pop_setter) {
+ /* We have 2 pops, use the shared */
+ pop_setter->set_single = &hws_action_setter_single_double_pop;
+ break;
+ }
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_MODIFY |
+ ASF_INSERT);
+ setter->flags |= ASF_SINGLE1 | ASF_REMOVE;
+ setter->set_single = &hws_action_setter_single;
+ setter->idx_single = i;
+ pop_setter = setter;
+ break;
+
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ /* Double insert inline */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_INSERT;
+ setter->set_double = &hws_action_setter_push_vlan;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ /* Double modify header list */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_MODIFY;
+ setter->set_double = &hws_action_setter_modify_header;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ /* Double ASO action */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE);
+ setter->flags |= ASF_DOUBLE;
+ setter->set_double = &hws_action_setter_aso;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ /* Single remove header to header */
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_MODIFY);
+ setter->flags |= ASF_SINGLE1 | ASF_REMOVE;
+ setter->set_single = &hws_action_setter_single;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ /* Double insert header with pointer */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_INSERT;
+ setter->set_double = &hws_action_setter_insert_ptr;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ /* Single remove + Double insert header with pointer */
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_DOUBLE);
+ setter->flags |= ASF_SINGLE1 | ASF_DOUBLE;
+ setter->set_double = &hws_action_setter_insert_ptr;
+ setter->idx_double = i;
+ setter->set_single = &hws_action_setter_common_decap;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ /* Double modify header list with remove and push inline */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_MODIFY | ASF_INSERT;
+ setter->set_double = &hws_action_setter_tnl_l3_to_l2;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_TAG:
+ /* Single TAG action, search for any room from the start */
+ setter = hws_action_setter_find_first(start_setter, ASF_SINGLE1);
+ setter->flags |= ASF_SINGLE1;
+ setter->set_single = &hws_action_setter_tag;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_CTR:
+ /* Control counter action
+ * TODO: Current counter executed first. Support is needed
+ * for single ation counter action which is done last.
+ * Example: Decap + CTR
+ */
+ setter = hws_action_setter_find_first(start_setter, ASF_CTR);
+ setter->flags |= ASF_CTR;
+ setter->set_ctr = &hws_action_setter_ctrl_ctr;
+ setter->idx_ctr = i;
+ break;
+ default:
+ pr_warn("HWS: Invalid action type in processingaction template: action_type[%d]=%d\n",
+ i, action_type[i]);
+ return -EOPNOTSUPP;
+ }
+
+ last_setter = max(setter, last_setter);
+ }
+
+ /* Set default hit on the last STE if no hit action provided */
+ if (!(last_setter->flags & ASF_HIT))
+ last_setter->set_hit = &hws_action_setter_default_hit;
+
+ at->num_of_action_stes = last_setter - start_setter + 1;
+
+ /* Check if action template doesn't require any action DWs */
+ at->only_term = (at->num_of_action_stes == 1) &&
+ !(last_setter->flags & ~(ASF_CTR | ASF_HIT));
+
+ return 0;
+}
+
+struct mlx5hws_action_template *
+mlx5hws_action_template_create(enum mlx5hws_action_type action_type[])
+{
+ struct mlx5hws_action_template *at;
+ u8 num_actions = 0;
+ int i;
+
+ at = kzalloc(sizeof(*at), GFP_KERNEL);
+ if (!at)
+ return NULL;
+
+ while (action_type[num_actions++] != MLX5HWS_ACTION_TYP_LAST)
+ ;
+
+ at->num_actions = num_actions - 1;
+ at->action_type_arr = kcalloc(num_actions, sizeof(*action_type), GFP_KERNEL);
+ if (!at->action_type_arr)
+ goto free_at;
+
+ for (i = 0; i < num_actions; i++)
+ at->action_type_arr[i] = action_type[i];
+
+ return at;
+
+free_at:
+ kfree(at);
+ return NULL;
+}
+
+int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at)
+{
+ kfree(at->action_type_arr);
+ kfree(at);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h
new file mode 100644
index 000000000000..bf5c1b241006
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_ACTION_H_
+#define MLX5HWS_ACTION_H_
+
+/* Max number of STEs needed for a rule (including match) */
+#define MLX5HWS_ACTION_MAX_STE 20
+
+/* Max number of internal subactions of ipv6_ext */
+#define MLX5HWS_ACTION_IPV6_EXT_MAX_SA 4
+
+enum mlx5hws_action_stc_idx {
+ MLX5HWS_ACTION_STC_IDX_CTRL = 0,
+ MLX5HWS_ACTION_STC_IDX_HIT = 1,
+ MLX5HWS_ACTION_STC_IDX_DW5 = 2,
+ MLX5HWS_ACTION_STC_IDX_DW6 = 3,
+ MLX5HWS_ACTION_STC_IDX_DW7 = 4,
+ MLX5HWS_ACTION_STC_IDX_MAX = 5,
+ /* STC Jumvo STE combo: CTR, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE = 1,
+ /* STC combo1: CTR, SINGLE, DOUBLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 = 3,
+ /* STC combo2: CTR, 3 x SINGLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 = 4,
+ /* STC combo2: CTR, TRIPLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO3 = 2,
+};
+
+enum mlx5hws_action_offset {
+ MLX5HWS_ACTION_OFFSET_DW0 = 0,
+ MLX5HWS_ACTION_OFFSET_DW5 = 5,
+ MLX5HWS_ACTION_OFFSET_DW6 = 6,
+ MLX5HWS_ACTION_OFFSET_DW7 = 7,
+ MLX5HWS_ACTION_OFFSET_HIT = 3,
+ MLX5HWS_ACTION_OFFSET_HIT_LSB = 4,
+};
+
+enum {
+ MLX5HWS_ACTION_DOUBLE_SIZE = 8,
+ MLX5HWS_ACTION_INLINE_DATA_SIZE = 4,
+ MLX5HWS_ACTION_HDR_LEN_L2_MACS = 12,
+ MLX5HWS_ACTION_HDR_LEN_L2_VLAN = 4,
+ MLX5HWS_ACTION_HDR_LEN_L2_ETHER = 2,
+ MLX5HWS_ACTION_HDR_LEN_L2 = (MLX5HWS_ACTION_HDR_LEN_L2_MACS +
+ MLX5HWS_ACTION_HDR_LEN_L2_ETHER),
+ MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN = (MLX5HWS_ACTION_HDR_LEN_L2 +
+ MLX5HWS_ACTION_HDR_LEN_L2_VLAN),
+ MLX5HWS_ACTION_REFORMAT_DATA_SIZE = 64,
+ DECAP_L3_NUM_ACTIONS_W_NO_VLAN = 6,
+ DECAP_L3_NUM_ACTIONS_W_VLAN = 7,
+};
+
+enum mlx5hws_action_setter_flag {
+ ASF_SINGLE1 = 1 << 0,
+ ASF_SINGLE2 = 1 << 1,
+ ASF_SINGLE3 = 1 << 2,
+ ASF_DOUBLE = ASF_SINGLE2 | ASF_SINGLE3,
+ ASF_TRIPLE = ASF_SINGLE1 | ASF_DOUBLE,
+ ASF_INSERT = 1 << 3,
+ ASF_REMOVE = 1 << 4,
+ ASF_MODIFY = 1 << 5,
+ ASF_CTR = 1 << 6,
+ ASF_HIT = 1 << 7,
+};
+
+struct mlx5hws_action_default_stc {
+ struct mlx5hws_pool_chunk nop_ctr;
+ struct mlx5hws_pool_chunk nop_dw5;
+ struct mlx5hws_pool_chunk nop_dw6;
+ struct mlx5hws_pool_chunk nop_dw7;
+ struct mlx5hws_pool_chunk default_hit;
+ u32 refcount;
+};
+
+struct mlx5hws_action_shared_stc {
+ struct mlx5hws_pool_chunk stc_chunk;
+ u32 refcount;
+};
+
+struct mlx5hws_actions_apply_data {
+ struct mlx5hws_send_engine *queue;
+ struct mlx5hws_rule_action *rule_action;
+ __be32 *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ u32 jump_to_action_stc;
+ struct mlx5hws_context_common_res *common_res;
+ enum mlx5hws_table_type tbl_type;
+ u32 next_direct_idx;
+ u8 require_dep;
+};
+
+struct mlx5hws_actions_wqe_setter;
+
+typedef void (*mlx5hws_action_setter_fp)(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter);
+
+struct mlx5hws_actions_wqe_setter {
+ mlx5hws_action_setter_fp set_single;
+ mlx5hws_action_setter_fp set_double;
+ mlx5hws_action_setter_fp set_triple;
+ mlx5hws_action_setter_fp set_hit;
+ mlx5hws_action_setter_fp set_ctr;
+ u8 idx_single;
+ u8 idx_double;
+ u8 idx_triple;
+ u8 idx_ctr;
+ u8 idx_hit;
+ u8 stage_idx;
+ u8 flags;
+};
+
+struct mlx5hws_action_template {
+ struct mlx5hws_actions_wqe_setter setters[MLX5HWS_ACTION_MAX_STE];
+ enum mlx5hws_action_type *action_type_arr;
+ u8 num_of_action_stes;
+ u8 num_actions;
+ u8 only_term;
+};
+
+struct mlx5hws_action {
+ u8 type;
+ u8 flags;
+ struct mlx5hws_context *ctx;
+ union {
+ struct {
+ struct mlx5hws_pool_chunk stc[MLX5HWS_TABLE_TYPE_MAX];
+ union {
+ struct {
+ u32 pat_id;
+ u32 arg_id;
+ __be64 single_action;
+ u32 nope_locations;
+ u8 num_of_patterns;
+ u8 single_action_type;
+ u8 num_of_actions;
+ u8 max_num_of_actions;
+ u8 require_reparse;
+ } modify_header;
+ struct {
+ u32 arg_id;
+ u32 header_size;
+ u16 max_hdr_sz;
+ u8 num_of_hdrs;
+ u8 anchor;
+ u8 e_anchor;
+ u8 offset;
+ bool encap;
+ u8 require_reparse;
+ } reformat;
+ struct {
+ u32 obj_id;
+ u8 return_reg_id;
+ } aso;
+ struct {
+ u16 vport_num;
+ u16 esw_owner_vhca_id;
+ bool esw_owner_vhca_id_valid;
+ } vport;
+ struct {
+ u32 obj_id;
+ } dest_obj;
+ struct {
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ size_t num_dest;
+ struct mlx5hws_cmd_set_fte_dest *dest_list;
+ } dest_array;
+ struct {
+ u8 type;
+ u8 start_anchor;
+ u8 end_anchor;
+ u8 num_of_words;
+ bool decap;
+ } insert_hdr;
+ struct {
+ /* PRM start anchor from which header will be removed */
+ u8 anchor;
+ /* Header remove offset in bytes, from the start
+ * anchor to the location where remove header starts.
+ */
+ u8 offset;
+ /* Indicates the removed header size in bytes */
+ size_t size;
+ } remove_header;
+ struct {
+ struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_action *hit_ft_action;
+ struct mlx5hws_definer *definer;
+ } range;
+ };
+ };
+
+ struct ibv_flow_action *flow_action;
+ u32 obj_id;
+ struct ibv_qp *qp;
+ };
+};
+
+const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type);
+
+int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx,
+ u8 tbl_type);
+
+void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx,
+ u8 tbl_type);
+
+void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst,
+ u16 num_of_actions);
+
+int mlx5hws_action_template_process(struct mlx5hws_action_template *at);
+
+bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions,
+ enum mlx5hws_table_type table_type);
+
+int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc);
+
+void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc);
+
+static inline void
+mlx5hws_action_setter_default_single(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(apply->common_res->default_stc->nop_dw5.offset);
+}
+
+static inline void
+mlx5hws_action_setter_default_double(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] =
+ htonl(apply->common_res->default_stc->nop_dw6.offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] =
+ htonl(apply->common_res->default_stc->nop_dw7.offset);
+}
+
+static inline void
+mlx5hws_action_setter_default_ctr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] =
+ htonl(apply->common_res->default_stc->nop_ctr.offset);
+}
+
+static inline void
+mlx5hws_action_apply_setter(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter,
+ bool is_jumbo)
+{
+ u8 num_of_actions;
+
+ /* Set control counter */
+ if (setter->set_ctr)
+ setter->set_ctr(apply, setter);
+ else
+ mlx5hws_action_setter_default_ctr(apply, setter);
+
+ if (!is_jumbo) {
+ if (unlikely(setter->set_triple)) {
+ /* Set triple on match */
+ setter->set_triple(apply, setter);
+ num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_COMBO3;
+ } else {
+ /* Set single and double on match */
+ if (setter->set_single)
+ setter->set_single(apply, setter);
+ else
+ mlx5hws_action_setter_default_single(apply, setter);
+
+ if (setter->set_double)
+ setter->set_double(apply, setter);
+ else
+ mlx5hws_action_setter_default_double(apply, setter);
+
+ num_of_actions = setter->set_double ?
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 :
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO2;
+ }
+ } else {
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+ num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE;
+ }
+
+ /* Set next/final hit action */
+ setter->set_hit(apply, setter);
+
+ /* Set number of actions */
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
+ htonl(num_of_actions << 29);
+}
+
+#endif /* MLX5HWS_ACTION_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c
new file mode 100644
index 000000000000..e6ed66202a40
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+#include "mlx5hws_buddy.h"
+
+static int hws_buddy_init(struct mlx5hws_buddy_mem *buddy, u32 max_order)
+{
+ int i, s, ret = 0;
+
+ buddy->max_order = max_order;
+
+ buddy->bitmap = kcalloc(buddy->max_order + 1,
+ sizeof(*buddy->bitmap),
+ GFP_KERNEL);
+ if (!buddy->bitmap)
+ return -ENOMEM;
+
+ buddy->num_free = kcalloc(buddy->max_order + 1,
+ sizeof(*buddy->num_free),
+ GFP_KERNEL);
+ if (!buddy->num_free) {
+ ret = -ENOMEM;
+ goto err_out_free_bits;
+ }
+
+ for (i = 0; i <= (int)buddy->max_order; ++i) {
+ s = 1 << (buddy->max_order - i);
+
+ buddy->bitmap[i] = bitmap_zalloc(s, GFP_KERNEL);
+ if (!buddy->bitmap[i]) {
+ ret = -ENOMEM;
+ goto err_out_free_num_free;
+ }
+ }
+
+ bitmap_set(buddy->bitmap[buddy->max_order], 0, 1);
+ buddy->num_free[buddy->max_order] = 1;
+
+ return 0;
+
+err_out_free_num_free:
+ for (i = 0; i <= (int)buddy->max_order; ++i)
+ bitmap_free(buddy->bitmap[i]);
+
+ kfree(buddy->num_free);
+
+err_out_free_bits:
+ kfree(buddy->bitmap);
+ return ret;
+}
+
+struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = kzalloc(sizeof(*buddy), GFP_KERNEL);
+ if (!buddy)
+ return NULL;
+
+ if (hws_buddy_init(buddy, max_order))
+ goto free_buddy;
+
+ return buddy;
+
+free_buddy:
+ kfree(buddy);
+ return NULL;
+}
+
+void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy)
+{
+ int i;
+
+ for (i = 0; i <= (int)buddy->max_order; ++i)
+ bitmap_free(buddy->bitmap[i]);
+
+ kfree(buddy->num_free);
+ kfree(buddy->bitmap);
+}
+
+static int hws_buddy_find_free_seg(struct mlx5hws_buddy_mem *buddy,
+ u32 start_order,
+ u32 *segment,
+ u32 *order)
+{
+ unsigned int seg, order_iter, m;
+
+ for (order_iter = start_order;
+ order_iter <= buddy->max_order; ++order_iter) {
+ if (!buddy->num_free[order_iter])
+ continue;
+
+ m = 1 << (buddy->max_order - order_iter);
+ seg = find_first_bit(buddy->bitmap[order_iter], m);
+
+ if (WARN(seg >= m,
+ "ICM Buddy: failed finding free mem for order %d\n",
+ order_iter))
+ return -ENOMEM;
+
+ break;
+ }
+
+ if (order_iter > buddy->max_order)
+ return -ENOMEM;
+
+ *segment = seg;
+ *order = order_iter;
+ return 0;
+}
+
+int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order)
+{
+ u32 seg, order_iter, err;
+
+ err = hws_buddy_find_free_seg(buddy, order, &seg, &order_iter);
+ if (err)
+ return err;
+
+ bitmap_clear(buddy->bitmap[order_iter], seg, 1);
+ --buddy->num_free[order_iter];
+
+ while (order_iter > order) {
+ --order_iter;
+ seg <<= 1;
+ bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1);
+ ++buddy->num_free[order_iter];
+ }
+
+ seg <<= order;
+
+ return seg;
+}
+
+void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order)
+{
+ seg >>= order;
+
+ while (test_bit(seg ^ 1, buddy->bitmap[order])) {
+ bitmap_clear(buddy->bitmap[order], seg ^ 1, 1);
+ --buddy->num_free[order];
+ seg >>= 1;
+ ++order;
+ }
+
+ bitmap_set(buddy->bitmap[order], seg, 1);
+ ++buddy->num_free[order];
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h
new file mode 100644
index 000000000000..338c44bbedaf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_BUDDY_H_
+#define MLX5HWS_BUDDY_H_
+
+struct mlx5hws_buddy_mem {
+ unsigned long **bitmap;
+ unsigned int *num_free;
+ u32 max_order;
+};
+
+struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order);
+
+void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy);
+
+int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order);
+
+void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order);
+
+#endif /* MLX5HWS_BUDDY_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c
new file mode 100644
index 000000000000..8f3a6f9d703d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c
@@ -0,0 +1,995 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+static u16 hws_bwc_gen_queue_idx(struct mlx5hws_context *ctx)
+{
+ /* assign random queue */
+ return get_random_u8() % mlx5hws_bwc_queues(ctx);
+}
+
+static u16
+hws_bwc_get_burst_th(struct mlx5hws_context *ctx, u16 queue_id)
+{
+ return min(ctx->send_queue[queue_id].num_entries / 2,
+ MLX5HWS_BWC_MATCHER_REHASH_BURST_TH);
+}
+
+static struct mutex *
+hws_bwc_get_queue_lock(struct mlx5hws_context *ctx, u16 idx)
+{
+ return &ctx->bwc_send_queue_locks[idx];
+}
+
+static void hws_bwc_lock_all_queues(struct mlx5hws_context *ctx)
+{
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mutex *queue_lock; /* Protect the queue */
+ int i;
+
+ for (i = 0; i < bwc_queues; i++) {
+ queue_lock = hws_bwc_get_queue_lock(ctx, i);
+ mutex_lock(queue_lock);
+ }
+}
+
+static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
+{
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mutex *queue_lock; /* Protect the queue */
+ int i = bwc_queues;
+
+ while (i--) {
+ queue_lock = hws_bwc_get_queue_lock(ctx, i);
+ mutex_unlock(queue_lock);
+ }
+}
+
+static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
+ u32 priority,
+ u8 size_log)
+{
+ memset(attr, 0, sizeof(*attr));
+
+ attr->priority = priority;
+ attr->optimize_using_rule_idx = 0;
+ attr->mode = MLX5HWS_MATCHER_RESOURCE_MODE_RULE;
+ attr->optimize_flow_src = MLX5HWS_MATCHER_FLOW_SRC_ANY;
+ attr->insert_mode = MLX5HWS_MATCHER_INSERT_BY_HASH;
+ attr->distribute_mode = MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH;
+ attr->rule.num_log = size_log;
+ attr->resizable = true;
+ attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+}
+
+int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask,
+ enum mlx5hws_action_type action_types[])
+{
+ enum mlx5hws_action_type init_action_types[1] = { MLX5HWS_ACTION_TYP_LAST };
+ struct mlx5hws_context *ctx = table->ctx;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_matcher_attr attr = {0};
+ int i;
+
+ bwc_matcher->rules = kcalloc(bwc_queues, sizeof(*bwc_matcher->rules), GFP_KERNEL);
+ if (!bwc_matcher->rules)
+ goto err;
+
+ for (i = 0; i < bwc_queues; i++)
+ INIT_LIST_HEAD(&bwc_matcher->rules[i]);
+
+ hws_bwc_matcher_init_attr(&attr,
+ priority,
+ MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG);
+
+ bwc_matcher->priority = priority;
+ bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+
+ /* create dummy action template */
+ bwc_matcher->at[0] =
+ mlx5hws_action_template_create(action_types ?
+ action_types : init_action_types);
+ if (!bwc_matcher->at[0]) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
+ goto free_bwc_matcher_rules;
+ }
+
+ bwc_matcher->num_of_at = 1;
+
+ bwc_matcher->mt = mlx5hws_match_template_create(ctx,
+ mask->match_buf,
+ mask->match_sz,
+ match_criteria_enable);
+ if (!bwc_matcher->mt) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating match template\n");
+ goto free_at;
+ }
+
+ bwc_matcher->matcher = mlx5hws_matcher_create(table,
+ &bwc_matcher->mt, 1,
+ &bwc_matcher->at[0],
+ bwc_matcher->num_of_at,
+ &attr);
+ if (!bwc_matcher->matcher) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating HWS matcher\n");
+ goto free_mt;
+ }
+
+ return 0;
+
+free_mt:
+ mlx5hws_match_template_destroy(bwc_matcher->mt);
+free_at:
+ mlx5hws_action_template_destroy(bwc_matcher->at[0]);
+free_bwc_matcher_rules:
+ kfree(bwc_matcher->rules);
+err:
+ return -EINVAL;
+}
+
+struct mlx5hws_bwc_matcher *
+mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher;
+ bool is_complex;
+ int ret;
+
+ if (!mlx5hws_context_bwc_supported(table->ctx)) {
+ mlx5hws_err(table->ctx,
+ "BWC matcher: context created w/o BWC API compatibility\n");
+ return NULL;
+ }
+
+ bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL);
+ if (!bwc_matcher)
+ return NULL;
+
+ /* Check if the required match params can be all matched
+ * in single STE, otherwise complex matcher is needed.
+ */
+
+ is_complex = mlx5hws_bwc_match_params_is_complex(table->ctx, match_criteria_enable, mask);
+ if (is_complex)
+ ret = mlx5hws_bwc_matcher_create_complex(bwc_matcher,
+ table,
+ priority,
+ match_criteria_enable,
+ mask);
+ else
+ ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
+ table,
+ priority,
+ match_criteria_enable,
+ mask,
+ NULL);
+ if (ret)
+ goto free_bwc_matcher;
+
+ return bwc_matcher;
+
+free_bwc_matcher:
+ kfree(bwc_matcher);
+
+ return NULL;
+}
+
+int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ int i;
+
+ mlx5hws_matcher_destroy(bwc_matcher->matcher);
+ bwc_matcher->matcher = NULL;
+
+ for (i = 0; i < bwc_matcher->num_of_at; i++)
+ mlx5hws_action_template_destroy(bwc_matcher->at[i]);
+
+ mlx5hws_match_template_destroy(bwc_matcher->mt);
+ kfree(bwc_matcher->rules);
+
+ return 0;
+}
+
+int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ if (bwc_matcher->num_of_rules)
+ mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
+ "BWC matcher destroy: matcher still has %d rules\n",
+ bwc_matcher->num_of_rules);
+
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+
+ kfree(bwc_matcher);
+ return 0;
+}
+
+static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 *pending_rules,
+ bool drain)
+{
+ struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
+ u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
+ bool got_comp = *pending_rules >= burst_th;
+ bool queue_full;
+ int err = 0;
+ int ret;
+ int i;
+
+ /* Check if there are any completions at all */
+ if (!got_comp && !drain)
+ return 0;
+
+ queue_full = mlx5hws_send_engine_full(&ctx->send_queue[queue_id]);
+ while (queue_full || ((got_comp || drain) && *pending_rules)) {
+ ret = mlx5hws_send_queue_poll(ctx, queue_id, comp, burst_th);
+ if (unlikely(ret < 0)) {
+ mlx5hws_err(ctx, "BWC poll error: polling queue %d returned %d\n",
+ queue_id, ret);
+ return -EINVAL;
+ }
+
+ if (ret) {
+ (*pending_rules) -= ret;
+ for (i = 0; i < ret; i++) {
+ if (unlikely(comp[i].status != MLX5HWS_FLOW_OP_SUCCESS)) {
+ mlx5hws_err(ctx,
+ "BWC poll error: polling queue %d returned completion with error\n",
+ queue_id);
+ err = -EINVAL;
+ }
+ }
+ queue_full = false;
+ }
+
+ got_comp = !!ret;
+ }
+
+ return err;
+}
+
+void
+mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u16 bwc_queue_idx,
+ u32 flow_source,
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+
+ /* no use of INSERT_BY_INDEX in bwc rule */
+ rule_attr->rule_idx = 0;
+
+ /* notify HW at each rule insertion/deletion */
+ rule_attr->burst = 0;
+
+ /* We don't need user data, but the API requires it to exist */
+ rule_attr->user_data = (void *)0xFACADE;
+
+ rule_attr->queue_id = mlx5hws_bwc_get_queue_id(ctx, bwc_queue_idx);
+ rule_attr->flow_source = flow_source;
+}
+
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_bwc_rule *bwc_rule;
+
+ bwc_rule = kzalloc(sizeof(*bwc_rule), GFP_KERNEL);
+ if (unlikely(!bwc_rule))
+ goto out_err;
+
+ bwc_rule->rule = kzalloc(sizeof(*bwc_rule->rule), GFP_KERNEL);
+ if (unlikely(!bwc_rule->rule))
+ goto free_rule;
+
+ bwc_rule->bwc_matcher = bwc_matcher;
+ return bwc_rule;
+
+free_rule:
+ kfree(bwc_rule);
+out_err:
+ return NULL;
+}
+
+void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ if (likely(bwc_rule->rule))
+ kfree(bwc_rule->rule);
+ kfree(bwc_rule);
+}
+
+static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ bwc_matcher->num_of_rules++;
+ bwc_rule->bwc_queue_idx = idx;
+ list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
+}
+
+static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ bwc_matcher->num_of_rules--;
+ list_del_init(&bwc_rule->list_node);
+}
+
+static int
+hws_bwc_rule_destroy_hws_async(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ return mlx5hws_rule_destroy(bwc_rule->rule, attr);
+}
+
+static int
+hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_flow_op_result completion;
+ int ret;
+
+ ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ do {
+ ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
+ } while (ret != 1);
+
+ if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS ||
+ (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
+ bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) {
+ mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n",
+ completion.status, bwc_rule->rule->status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u16 idx = bwc_rule->bwc_queue_idx;
+ struct mlx5hws_rule_attr attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ int ret;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &attr);
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, idx);
+
+ mutex_lock(queue_lock);
+
+ ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
+ hws_bwc_rule_list_remove(bwc_rule);
+
+ mutex_unlock(queue_lock);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ int ret;
+
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+
+ mlx5hws_bwc_rule_free(bwc_rule);
+ return ret;
+}
+
+static int
+hws_bwc_rule_create_async(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ return mlx5hws_rule_create(bwc_rule->bwc_matcher->matcher,
+ 0, /* only one match template supported */
+ match_param,
+ at_idx,
+ rule_actions,
+ rule_attr,
+ bwc_rule->rule);
+}
+
+static int
+hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+
+{
+ struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+ u32 expected_completions = 1;
+ int ret;
+
+ ret = hws_bwc_rule_create_async(bwc_rule, match_param,
+ at_idx, rule_actions,
+ rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+
+ return ret;
+}
+
+static int
+hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u32 expected_completions = 1;
+ int ret;
+
+ ret = mlx5hws_rule_action_update(bwc_rule->rule,
+ at_idx,
+ rule_actions,
+ rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+ if (unlikely(ret))
+ mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
+
+ return ret;
+}
+
+static bool
+hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
+
+ return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >=
+ caps->ste_alloc_log_max - 1;
+}
+
+static bool
+hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u32 num_of_rules)
+{
+ if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher)))
+ return false;
+
+ if (unlikely((num_of_rules * 100 / MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH) >=
+ (1UL << bwc_matcher->size_log)))
+ return true;
+
+ return false;
+}
+
+static void
+hws_bwc_rule_actions_to_action_types(struct mlx5hws_rule_action rule_actions[],
+ enum mlx5hws_action_type action_types[])
+{
+ int i = 0;
+
+ for (i = 0;
+ rule_actions[i].action && (rule_actions[i].action->type != MLX5HWS_ACTION_TYP_LAST);
+ i++) {
+ action_types[i] = (enum mlx5hws_action_type)rule_actions[i].action->type;
+ }
+
+ action_types[i] = MLX5HWS_ACTION_TYP_LAST;
+}
+
+static int
+hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS];
+
+ hws_bwc_rule_actions_to_action_types(rule_actions, action_types);
+
+ bwc_matcher->at[bwc_matcher->num_of_at] =
+ mlx5hws_action_template_create(action_types);
+
+ if (unlikely(!bwc_matcher->at[bwc_matcher->num_of_at]))
+ return -ENOMEM;
+
+ bwc_matcher->num_of_at++;
+ return 0;
+}
+
+static int
+hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher))) {
+ mlx5hws_err(ctx, "Can't resize matcher: depth exceeds limit %d\n",
+ caps->rtc_log_depth_max);
+ return -ENOMEM;
+ }
+
+ bwc_matcher->size_log =
+ min(bwc_matcher->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+ caps->ste_alloc_log_max - MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH);
+
+ return 0;
+}
+
+static int
+hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ enum mlx5hws_action_type *action_type_arr;
+ int i, j;
+
+ /* start from index 1 - first action template is a dummy */
+ for (i = 1; i < bwc_matcher->num_of_at; i++) {
+ j = 0;
+ action_type_arr = bwc_matcher->at[i]->action_type_arr;
+
+ while (rule_actions[j].action &&
+ rule_actions[j].action->type != MLX5HWS_ACTION_TYP_LAST) {
+ if (action_type_arr[j] != rule_actions[j].action->type)
+ break;
+ j++;
+ }
+
+ if (action_type_arr[j] == MLX5HWS_ACTION_TYP_LAST &&
+ (!rule_actions[j].action ||
+ rule_actions[j].action->type == MLX5HWS_ACTION_TYP_LAST))
+ return i;
+ }
+
+ return -1;
+}
+
+static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_bwc_rule **bwc_rules;
+ struct mlx5hws_rule_attr rule_attr;
+ u32 *pending_rules;
+ int i, j, ret = 0;
+ bool all_done;
+ u16 burst_th;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+
+ pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL);
+ if (!pending_rules)
+ return -ENOMEM;
+
+ bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL);
+ if (!bwc_rules) {
+ ret = -ENOMEM;
+ goto free_pending_rules;
+ }
+
+ for (i = 0; i < bwc_queues; i++) {
+ if (list_empty(&bwc_matcher->rules[i]))
+ bwc_rules[i] = NULL;
+ else
+ bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i],
+ struct mlx5hws_bwc_rule,
+ list_node);
+ }
+
+ do {
+ all_done = true;
+
+ for (i = 0; i < bwc_queues; i++) {
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+ burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
+
+ for (j = 0; j < burst_th && bwc_rules[i]; j++) {
+ rule_attr.burst = !!((j + 1) % burst_th);
+ ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher,
+ bwc_rules[i]->rule,
+ &rule_attr);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule failed during rehash (%d)\n",
+ ret);
+ goto free_bwc_rules;
+ }
+
+ all_done = false;
+ pending_rules[i]++;
+ bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node,
+ &bwc_matcher->rules[i]) ?
+ NULL : list_next_entry(bwc_rules[i], list_node);
+
+ ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
+ &pending_rules[i], false);
+ if (unlikely(ret))
+ goto free_bwc_rules;
+ }
+ }
+ } while (!all_done);
+
+ /* drain all the bwc queues */
+ for (i = 0; i < bwc_queues; i++) {
+ if (pending_rules[i]) {
+ u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+
+ mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
+ ret = hws_bwc_queue_poll(ctx, queue_id,
+ &pending_rules[i], true);
+ if (unlikely(ret))
+ goto free_bwc_rules;
+ }
+ }
+
+free_bwc_rules:
+ kfree(bwc_rules);
+free_pending_rules:
+ kfree(pending_rules);
+
+ return ret;
+}
+
+static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ return hws_bwc_matcher_move_all_simple(bwc_matcher);
+}
+
+static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher_attr matcher_attr = {0};
+ struct mlx5hws_matcher *old_matcher;
+ struct mlx5hws_matcher *new_matcher;
+ int ret;
+
+ hws_bwc_matcher_init_attr(&matcher_attr,
+ bwc_matcher->priority,
+ bwc_matcher->size_log);
+
+ old_matcher = bwc_matcher->matcher;
+ new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
+ &bwc_matcher->mt, 1,
+ bwc_matcher->at,
+ bwc_matcher->num_of_at,
+ &matcher_attr);
+ if (!new_matcher) {
+ mlx5hws_err(ctx, "Rehash error: matcher creation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Rehash error: failed setting resize target\n");
+ return ret;
+ }
+
+ ret = hws_bwc_matcher_move_all(bwc_matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Rehash error: moving rules failed\n");
+ return -ENOMEM;
+ }
+
+ bwc_matcher->matcher = new_matcher;
+ mlx5hws_matcher_destroy(old_matcher);
+
+ return 0;
+}
+
+static int
+hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ int ret;
+
+ /* If the current matcher size is already at its max size, we can't
+ * do the rehash. Skip it and try adding the rule again - perhaps
+ * there was some change.
+ */
+ if (hws_bwc_matcher_size_maxed_out(bwc_matcher))
+ return 0;
+
+ /* It is possible that other rule has already performed rehash.
+ * Need to check again if we really need rehash.
+ * If the reason for rehash was size, but not any more - skip rehash.
+ */
+ if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, bwc_matcher->num_of_rules))
+ return 0;
+
+ /* Now we're done all the checking - do the rehash:
+ * - extend match RTC size
+ * - create new matcher
+ * - move all the rules to the new matcher
+ * - destroy the old matcher
+ */
+
+ ret = hws_bwc_matcher_extend_size(bwc_matcher);
+ if (ret)
+ return ret;
+
+ return hws_bwc_matcher_move(bwc_matcher);
+}
+
+static int
+hws_bwc_matcher_rehash_at(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ /* Rehash by action template doesn't require any additional checking.
+ * The bwc_matcher already contains the new action template.
+ * Just do the usual rehash:
+ * - create new matcher
+ * - move all the rules to the new matcher
+ * - destroy the old matcher
+ */
+ return hws_bwc_matcher_move(bwc_matcher);
+}
+
+int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ struct mlx5hws_rule_action rule_actions[],
+ u32 flow_source,
+ u16 bwc_queue_idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_rule_attr rule_attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ u32 num_of_rules;
+ int ret = 0;
+ int at_idx;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, bwc_queue_idx, flow_source, &rule_attr);
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+
+ mutex_lock(queue_lock);
+
+ /* check if rehash needed due to missing action template */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (unlikely(at_idx < 0)) {
+ /* we need to extend BWC matcher action templates array */
+ mutex_unlock(queue_lock);
+ hws_bwc_lock_all_queues(ctx);
+
+ ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+ if (unlikely(ret)) {
+ hws_bwc_unlock_all_queues(ctx);
+ return ret;
+ }
+
+ /* action templates array was extended, we need the last idx */
+ at_idx = bwc_matcher->num_of_at - 1;
+
+ ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+ bwc_matcher->at[at_idx]);
+ if (unlikely(ret)) {
+ /* Action template attach failed, possibly due to
+ * requiring more action STEs.
+ * Need to attempt creating new matcher with all
+ * the action templates, including the new one.
+ */
+ ret = hws_bwc_matcher_rehash_at(bwc_matcher);
+ if (unlikely(ret)) {
+ mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
+ bwc_matcher->at[at_idx] = NULL;
+ bwc_matcher->num_of_at--;
+
+ hws_bwc_unlock_all_queues(ctx);
+
+ mlx5hws_err(ctx,
+ "BWC rule insertion: rehash AT failed (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ hws_bwc_unlock_all_queues(ctx);
+ mutex_lock(queue_lock);
+ }
+
+ /* check if number of rules require rehash */
+ num_of_rules = bwc_matcher->num_of_rules;
+
+ if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
+ mutex_unlock(queue_lock);
+
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
+
+ if (ret) {
+ mlx5hws_err(ctx, "BWC rule insertion: rehash size [%d -> %d] failed (%d)\n",
+ bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+ bwc_matcher->size_log,
+ ret);
+ return ret;
+ }
+
+ mutex_lock(queue_lock);
+ }
+
+ ret = hws_bwc_rule_create_sync(bwc_rule,
+ match_param,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+ if (likely(!ret)) {
+ hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+ return 0; /* rule inserted successfully */
+ }
+
+ /* At this point the rule wasn't added.
+ * It could be because there was collision, or some other problem.
+ * If we don't dive deeper than API, the only thing we know is that
+ * the status of completion is RTE_FLOW_OP_ERROR.
+ * Try rehash by size and insert rule again - last chance.
+ */
+
+ mutex_unlock(queue_lock);
+
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
+
+ if (ret) {
+ mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* Rehash done, but we still have that pesky rule to add */
+ mutex_lock(queue_lock);
+
+ ret = hws_bwc_rule_create_sync(bwc_rule,
+ match_param,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+
+ if (unlikely(ret)) {
+ mutex_unlock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
+ return ret;
+ }
+
+ hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+
+ return 0;
+}
+
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_bwc_rule *bwc_rule;
+ u16 bwc_queue_idx;
+ int ret;
+
+ if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
+ mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
+ return NULL;
+ }
+
+ bwc_rule = mlx5hws_bwc_rule_alloc(bwc_matcher);
+ if (unlikely(!bwc_rule))
+ return NULL;
+
+ bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
+
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
+ params->match_buf,
+ rule_actions,
+ flow_source,
+ bwc_queue_idx);
+ if (unlikely(ret)) {
+ mlx5hws_bwc_rule_free(bwc_rule);
+ return NULL;
+ }
+
+ return bwc_rule;
+}
+
+static int
+hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_rule_attr rule_attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ int at_idx, ret;
+ u16 idx;
+
+ idx = bwc_rule->bwc_queue_idx;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &rule_attr);
+ queue_lock = hws_bwc_get_queue_lock(ctx, idx);
+
+ mutex_lock(queue_lock);
+
+ /* check if rehash needed due to missing action template */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (unlikely(at_idx < 0)) {
+ /* we need to extend BWC matcher action templates array */
+ mutex_unlock(queue_lock);
+ hws_bwc_lock_all_queues(ctx);
+
+ /* check again - perhaps other thread already did extend_at */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (likely(at_idx < 0)) {
+ ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+ if (unlikely(ret)) {
+ hws_bwc_unlock_all_queues(ctx);
+ mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret);
+ return -EINVAL;
+ }
+
+ /* action templates array was extended, we need the last idx */
+ at_idx = bwc_matcher->num_of_at - 1;
+
+ ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+ bwc_matcher->at[at_idx]);
+ if (unlikely(ret)) {
+ /* Action template attach failed, possibly due to
+ * requiring more action STEs.
+ * Need to attempt creating new matcher with all
+ * the action templates, including the new one.
+ */
+ ret = hws_bwc_matcher_rehash_at(bwc_matcher);
+ if (unlikely(ret)) {
+ mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
+ bwc_matcher->at[at_idx] = NULL;
+ bwc_matcher->num_of_at--;
+
+ hws_bwc_unlock_all_queues(ctx);
+
+ mlx5hws_err(ctx,
+ "BWC rule update: rehash AT failed (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ hws_bwc_unlock_all_queues(ctx);
+ mutex_lock(queue_lock);
+ }
+
+ ret = hws_bwc_rule_update_sync(bwc_rule,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+ mutex_unlock(queue_lock);
+
+ if (unlikely(ret))
+ mlx5hws_err(ctx, "BWC rule: update failed (%d)\n", ret);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+
+ if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
+ mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
+ return -EINVAL;
+ }
+
+ return hws_bwc_rule_action_update(bwc_rule, rule_actions);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
new file mode 100644
index 000000000000..4fe8c32d8fbe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_BWC_H_
+#define MLX5HWS_BWC_H_
+
+#define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1
+#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
+#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
+#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
+#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255
+
+#define MLX5HWS_BWC_MAX_ACTS 16
+
+struct mlx5hws_bwc_matcher {
+ struct mlx5hws_matcher *matcher;
+ struct mlx5hws_match_template *mt;
+ struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
+ u8 num_of_at;
+ u16 priority;
+ u8 size_log;
+ u32 num_of_rules; /* atomically accessed */
+ struct list_head *rules;
+};
+
+struct mlx5hws_bwc_rule {
+ struct mlx5hws_bwc_matcher *bwc_matcher;
+ struct mlx5hws_rule *rule;
+ u16 bwc_queue_idx;
+ struct list_head list_node;
+};
+
+int
+mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask,
+ enum mlx5hws_action_type action_types[]);
+
+int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+struct mlx5hws_bwc_rule *mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule);
+
+int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ struct mlx5hws_rule_action rule_actions[],
+ u32 flow_source,
+ u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule);
+
+void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u16 bwc_queue_idx,
+ u32 flow_source,
+ struct mlx5hws_rule_attr *rule_attr);
+
+static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
+{
+ /* Besides the control queue, half of the queues are
+ * reguler HWS queues, and the other half are BWC queues.
+ */
+ return (ctx->queues - 1) / 2;
+}
+
+static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
+{
+ return idx + mlx5hws_bwc_queues(ctx);
+}
+
+#endif /* MLX5HWS_BWC_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
new file mode 100644
index 000000000000..601fad5fc54a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_definer match_layout = {0};
+ struct mlx5hws_match_template *mt;
+ bool is_complex = false;
+ int ret;
+
+ if (!match_criteria_enable)
+ return false; /* empty matcher */
+
+ mt = mlx5hws_match_template_create(ctx,
+ mask->match_buf,
+ mask->match_sz,
+ match_criteria_enable);
+ if (!mt) {
+ mlx5hws_err(ctx, "BWC: failed creating match template\n");
+ return false;
+ }
+
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+ if (ret) {
+ /* The only case that we're interested in is E2BIG,
+ * which means that the match parameters need to be
+ * split into complex martcher.
+ * For all other cases (good or bad) - just return true
+ * and let the usual match creation path handle it,
+ * both for good and bad flows.
+ */
+ if (ret == -E2BIG) {
+ is_complex = true;
+ mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n");
+ } else {
+ mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+ }
+ }
+
+ mlx5hws_match_template_destroy(mt);
+
+ return is_complex;
+}
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n");
+ return -EOPNOTSUPP;
+}
+
+void
+mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ /* nothing to do here */
+}
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx)
+{
+ mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx,
+ "Complex rule is not supported yet\n");
+ return -EOPNOTSUPP;
+}
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ return 0;
+}
+
+int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
+ "Moving complex rule is not supported yet\n");
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
new file mode 100644
index 000000000000..068ee8118609
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_BWC_COMPLEX_H_
+#define MLX5HWS_BWC_COMPLEX_H_
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule);
+
+#endif /* MLX5HWS_BWC_COMPLEX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c
new file mode 100644
index 000000000000..2c7b14172049
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c
@@ -0,0 +1,1300 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+static enum mlx5_ifc_flow_destination_type
+hws_cmd_dest_type_to_ifc_dest_type(enum mlx5_flow_destination_type type)
+{
+ switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
+ case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ case MLX5_FLOW_DESTINATION_TYPE_NONE:
+ case MLX5_FLOW_DESTINATION_TYPE_PORT:
+ case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ default:
+ pr_warn("HWS: unknown flow dest type %d\n", type);
+ return 0;
+ }
+};
+
+static int hws_cmd_general_obj_destroy(struct mlx5_core_dev *mdev,
+ u32 object_type,
+ u32 object_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, object_type);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, object_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ u32 *table_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
+ void *ft_ctx;
+ int ret;
+
+ MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
+ MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
+
+ ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
+ MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
+ MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
+ MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en);
+ MLX5_SET(flow_table_context, ft_ctx, decap_en, ft_attr->decap_en);
+
+ ret = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
+ if (ret)
+ return ret;
+
+ *table_id = MLX5_GET(create_flow_table_out, out, table_id);
+
+ return 0;
+}
+
+int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr,
+ u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
+ void *ft_ctx;
+
+ MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE);
+ MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type);
+ MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs);
+ MLX5_SET(modify_flow_table_in, in, table_id, table_id);
+
+ ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context);
+
+ MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action);
+ MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id);
+ MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_0, ft_attr->rtc_id_0);
+ MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_1, ft_attr->rtc_id_1);
+
+ return mlx5_cmd_exec_in(mdev, modify_flow_table, in);
+}
+
+int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
+ u32 table_id,
+ struct mlx5hws_cmd_ft_query_attr *ft_attr,
+ u64 *icm_addr_0, u64 *icm_addr_1)
+{
+ u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0};
+ void *ft_ctx;
+ int ret;
+
+ MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE);
+ MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type);
+ MLX5_SET(query_flow_table_in, in, table_id, table_id);
+
+ ret = mlx5_cmd_exec_inout(mdev, query_flow_table, in, out);
+ if (ret)
+ return ret;
+
+ ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context);
+ *icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_0);
+ *icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_1);
+
+ return ret;
+}
+
+int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u8 fw_ft_type, u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
+
+ MLX5_SET(destroy_flow_table_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+ MLX5_SET(destroy_flow_table_in, in, table_type, fw_ft_type);
+ MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
+
+ return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
+}
+
+void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u32 table_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_FT_ALIAS, table_id);
+}
+
+static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_fg_attr *fg_attr,
+ u32 *group_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ u32 *in;
+ int ret;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+ MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type);
+ MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id);
+
+ ret = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
+ if (ret)
+ goto out;
+
+ *group_id = MLX5_GET(create_flow_group_out, out, group_id);
+
+out:
+ kvfree(in);
+ return ret;
+}
+
+static int hws_cmd_flow_group_destroy(struct mlx5_core_dev *mdev,
+ u32 ft_id, u32 fg_id, u8 ft_type)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
+
+ MLX5_SET(destroy_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET(destroy_flow_group_in, in, table_type, ft_type);
+ MLX5_SET(destroy_flow_group_in, in, table_id, ft_id);
+ MLX5_SET(destroy_flow_group_in, in, group_id, fg_id);
+
+ return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
+}
+
+int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
+ void *in_flow_context;
+ u32 dest_entry_sz;
+ u32 total_dest_sz;
+ u32 action_flags;
+ u8 *in_dests;
+ u32 inlen;
+ u32 *in;
+ int ret;
+ u32 i;
+
+ dest_entry_sz = fte_attr->extended_dest ?
+ MLX5_ST_SZ_BYTES(extended_dest_format) :
+ MLX5_ST_SZ_BYTES(dest_format);
+ total_dest_sz = dest_entry_sz * fte_attr->dests_num;
+ inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+ MLX5_SET(set_fte_in, in, table_type, table_type);
+ MLX5_SET(set_fte_in, in, table_id, table_id);
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+ MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
+
+ action_flags = fte_attr->action_flags;
+ MLX5_SET(flow_context, in_flow_context, action, action_flags);
+
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ MLX5_SET(flow_context, in_flow_context,
+ packet_reformat_id, fte_attr->packet_reformat_id);
+ }
+
+ if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) {
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_type, fte_attr->encrypt_decrypt_type);
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id);
+ }
+
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ in_dests = (u8 *)MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+
+ for (i = 0; i < fte_attr->dests_num; i++) {
+ struct mlx5hws_cmd_set_fte_dest *dest = &fte_attr->dests[i];
+ enum mlx5_ifc_flow_destination_type ifc_dest_type =
+ hws_cmd_dest_type_to_ifc_dest_type(dest->destination_type);
+
+ switch (dest->destination_type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) {
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dest->esw_owner_vhca_id);
+ }
+ fallthrough;
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ MLX5_SET(dest_format, in_dests, destination_type, ifc_dest_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_REFORMAT) {
+ MLX5_SET(dest_format, in_dests, packet_reformat, 1);
+ MLX5_SET(extended_dest_format, in_dests, packet_reformat_id,
+ dest->ext_reformat_id);
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ in_dests = in_dests + dest_entry_sz;
+ }
+ MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed creating FLOW_TABLE_ENTRY\n");
+
+out:
+ kfree(in);
+ return ret;
+}
+
+int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
+
+ MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+ MLX5_SET(delete_fte_in, in, table_type, table_type);
+ MLX5_SET(delete_fte_in, in, table_id, table_id);
+
+ return mlx5_cmd_exec_in(mdev, delete_fte, in);
+}
+
+struct mlx5hws_cmd_forward_tbl *
+mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr)
+{
+ struct mlx5hws_cmd_fg_attr fg_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *tbl;
+ int ret;
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ return NULL;
+
+ ret = mlx5hws_cmd_flow_table_create(mdev, ft_attr, &tbl->ft_id);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FT\n");
+ goto free_tbl;
+ }
+
+ fg_attr.table_id = tbl->ft_id;
+ fg_attr.table_type = ft_attr->type;
+
+ ret = hws_cmd_flow_group_create(mdev, &fg_attr, &tbl->fg_id);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FG\n");
+ goto free_ft;
+ }
+
+ ret = mlx5hws_cmd_set_fte(mdev, ft_attr->type,
+ tbl->ft_id, tbl->fg_id, fte_attr);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FTE\n");
+ goto free_fg;
+ }
+
+ tbl->type = ft_attr->type;
+ return tbl;
+
+free_fg:
+ hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, ft_attr->type);
+free_ft:
+ mlx5hws_cmd_flow_table_destroy(mdev, ft_attr->type, tbl->ft_id);
+free_tbl:
+ kfree(tbl);
+ return NULL;
+}
+
+void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_forward_tbl *tbl)
+{
+ mlx5hws_cmd_delete_fte(mdev, tbl->type, tbl->ft_id);
+ hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, tbl->type);
+ mlx5hws_cmd_flow_table_destroy(mdev, tbl->type, tbl->ft_id);
+ kfree(tbl);
+}
+
+void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
+ u32 fw_ft_type,
+ enum mlx5hws_table_type type,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr)
+{
+ u32 default_miss_tbl;
+
+ if (type != MLX5HWS_TABLE_TYPE_FDB)
+ return;
+
+ ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr->type = fw_ft_type;
+ ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
+
+ default_miss_tbl = ctx->common_res[type].default_miss->ft_id;
+ if (!default_miss_tbl) {
+ pr_warn("HWS: no flow table ID for default miss\n");
+ return;
+ }
+
+ ft_attr->table_miss_id = default_miss_tbl;
+}
+
+int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ u32 *rtc_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_rtc_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_rtc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_RTC);
+
+ attr = MLX5_ADDR_OF(create_rtc_in, in, rtc);
+ MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ?
+ MLX5_IFC_RTC_STE_FORMAT_11DW :
+ MLX5_IFC_RTC_STE_FORMAT_8DW);
+
+ if (rtc_attr->is_scnd_range) {
+ MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE);
+ MLX5_SET(rtc, attr, num_match_ste, 2);
+ }
+
+ MLX5_SET(rtc, attr, pd, rtc_attr->pd);
+ MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe);
+ MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode);
+ MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode);
+ MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
+ MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth);
+ MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size);
+ MLX5_SET(rtc, attr, table_type, rtc_attr->table_type);
+ MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
+ MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0);
+ MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
+ MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
+ MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
+ MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
+ MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
+ MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create RTC\n");
+ goto out;
+ }
+
+ *rtc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_RTC, rtc_id);
+}
+
+int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_create_attr *stc_attr,
+ u32 *stc_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STC);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, stc_attr->log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, stc);
+ MLX5_SET(stc, attr, table_type, stc_attr->table_type);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create STC\n");
+ goto out;
+ }
+
+ *stc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STC, stc_id);
+}
+
+static int
+hws_cmd_stc_modify_set_stc_param(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ void *stc_param)
+{
+ switch (stc_attr->action_type) {
+ case MLX5_IFC_STC_ACTION_TYPE_COUNTER:
+ MLX5_SET(stc_ste_param_flow_counter, stc_param, flow_counter_id, stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR:
+ MLX5_SET(stc_ste_param_tir, stc_param, tirn, stc_attr->dest_tir_num);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
+ MLX5_SET(stc_ste_param_table, stc_param, table_id, stc_attr->dest_table_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
+ MLX5_SET(stc_ste_param_header_modify_list, stc_param,
+ header_modify_pattern_id, stc_attr->modify_header.pattern_id);
+ MLX5_SET(stc_ste_param_header_modify_list, stc_param,
+ header_modify_argument_id, stc_attr->modify_header.arg_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE:
+ MLX5_SET(stc_ste_param_remove, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE);
+ MLX5_SET(stc_ste_param_remove, stc_param, decap,
+ stc_attr->remove_header.decap);
+ MLX5_SET(stc_ste_param_remove, stc_param, remove_start_anchor,
+ stc_attr->remove_header.start_anchor);
+ MLX5_SET(stc_ste_param_remove, stc_param, remove_end_anchor,
+ stc_attr->remove_header.end_anchor);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT:
+ MLX5_SET(stc_ste_param_insert, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_INSERT);
+ MLX5_SET(stc_ste_param_insert, stc_param, encap,
+ stc_attr->insert_header.encap);
+ MLX5_SET(stc_ste_param_insert, stc_param, inline_data,
+ stc_attr->insert_header.is_inline);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_anchor,
+ stc_attr->insert_header.insert_anchor);
+ /* HW gets the next 2 sizes in words */
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_size,
+ stc_attr->insert_header.header_size / W_SIZE);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_offset,
+ stc_attr->insert_header.insert_offset / W_SIZE);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_argument,
+ stc_attr->insert_header.arg_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_COPY:
+ case MLX5_IFC_STC_ACTION_TYPE_SET:
+ case MLX5_IFC_STC_ACTION_TYPE_ADD:
+ case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD:
+ *(__be64 *)stc_param = stc_attr->modify_action.data;
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK:
+ MLX5_SET(stc_ste_param_vport, stc_param, vport_number,
+ stc_attr->vport.vport_num);
+ MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id,
+ stc_attr->vport.esw_owner_vhca_id);
+ MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id_valid,
+ stc_attr->vport.eswitch_owner_vhca_id_valid);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_DROP:
+ case MLX5_IFC_STC_ACTION_TYPE_NOP:
+ case MLX5_IFC_STC_ACTION_TYPE_TAG:
+ case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_ASO:
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_object_id,
+ stc_attr->aso.devx_obj_id);
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, return_reg_id,
+ stc_attr->aso.return_reg_id);
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_type,
+ stc_attr->aso.aso_type);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
+ MLX5_SET(stc_ste_param_ste_table, stc_param, ste_obj_id,
+ stc_attr->ste_table.ste_obj_id);
+ MLX5_SET(stc_ste_param_ste_table, stc_param, match_definer_id,
+ stc_attr->ste_table.match_definer_id);
+ MLX5_SET(stc_ste_param_ste_table, stc_param, log_hash_size,
+ stc_attr->ste_table.log_hash_size);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS:
+ MLX5_SET(stc_ste_param_remove_words, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
+ MLX5_SET(stc_ste_param_remove_words, stc_param, remove_start_anchor,
+ stc_attr->remove_words.start_anchor);
+ MLX5_SET(stc_ste_param_remove_words, stc_param,
+ remove_size, stc_attr->remove_words.num_of_words);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION:
+ MLX5_SET(stc_ste_param_ipsec_encrypt, stc_param, ipsec_object_id,
+ stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION:
+ MLX5_SET(stc_ste_param_ipsec_decrypt, stc_param, ipsec_object_id,
+ stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_TRAILER:
+ MLX5_SET(stc_ste_param_trailer, stc_param, command,
+ stc_attr->reformat_trailer.op);
+ MLX5_SET(stc_ste_param_trailer, stc_param, type,
+ stc_attr->reformat_trailer.type);
+ MLX5_SET(stc_ste_param_trailer, stc_param, length,
+ stc_attr->reformat_trailer.size);
+ break;
+ default:
+ mlx5_core_err(mdev, "Not supported type %d\n", stc_attr->action_type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev,
+ u32 stc_id,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
+ void *stc_param;
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, stc_id);
+ MLX5_SET(general_obj_in_cmd_hdr, in,
+ op_param.query.obj_offset, stc_attr->stc_offset);
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, stc);
+ MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset);
+ MLX5_SET(stc, attr, action_type, stc_attr->action_type);
+ MLX5_SET(stc, attr, reparse_mode, stc_attr->reparse_mode);
+ MLX5_SET64(stc, attr, modify_field_select,
+ MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC);
+
+ /* Set destination TIRN, TAG, FT ID, STE ID */
+ stc_param = MLX5_ADDR_OF(stc, attr, stc_param);
+ ret = hws_cmd_stc_modify_set_stc_param(mdev, stc_attr, stc_param);
+ if (ret)
+ return ret;
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to modify STC FW action_type %d\n",
+ stc_attr->action_type);
+
+ return ret;
+}
+
+int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
+ u16 log_obj_range,
+ u32 pd,
+ u32 *arg_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_arg_in, in, arg);
+ MLX5_SET(arg, attr, access_pd, pd);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create ARG\n");
+ goto out;
+ }
+
+ *arg_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev,
+ u32 arg_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT, arg_id);
+}
+
+int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev,
+ u32 pattern_length,
+ u8 *actions,
+ u32 *ptrn_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ int num_of_actions;
+ u64 *pattern_data;
+ void *pattern;
+ void *attr;
+ int ret;
+ int i;
+
+ if (pattern_length > MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY) {
+ mlx5_core_err(mdev, "Pattern length %d exceeds limit %d\n",
+ pattern_length, MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY);
+ return -EINVAL;
+ }
+
+ attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN);
+
+ pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern);
+ /* Pattern_length is in ddwords */
+ MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE));
+
+ pattern_data = (u64 *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data);
+ memcpy(pattern_data, actions, pattern_length);
+
+ num_of_actions = pattern_length / MLX5HWS_MODIFY_ACTION_SIZE;
+ for (i = 0; i < num_of_actions; i++) {
+ int type;
+
+ type = MLX5_GET(set_action_in, &pattern_data[i], action_type);
+ if (type != MLX5_MODIFICATION_TYPE_COPY &&
+ type != MLX5_MODIFICATION_TYPE_ADD_FIELD)
+ /* Action typ-copy use all bytes for control */
+ MLX5_SET(set_action_in, &pattern_data[i], data, 0);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create header_modify_pattern\n");
+ goto out;
+ }
+
+ *ptrn_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev,
+ u32 ptrn_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN, ptrn_id);
+}
+
+int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ste_create_attr *ste_attr,
+ u32 *ste_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_ste_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_ste_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STE);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, ste_attr->log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_ste_in, in, ste);
+ MLX5_SET(ste, attr, table_type, ste_attr->table_type);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create STE\n");
+ goto out;
+ }
+
+ *ste_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STE, ste_id);
+}
+
+int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_definer_create_attr *def_attr,
+ u32 *definer_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_definer_in)] = {0};
+ void *ptr;
+ int ret;
+
+ MLX5_SET(general_obj_in_cmd_hdr,
+ in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
+
+ ptr = MLX5_ADDR_OF(create_definer_in, in, definer);
+ MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT);
+
+ MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]);
+ MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]);
+ MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]);
+ MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]);
+ MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]);
+ MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]);
+ MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]);
+ MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]);
+ MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]);
+
+ MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]);
+ MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]);
+ MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]);
+ MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]);
+ MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]);
+ MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]);
+ MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]);
+ MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]);
+
+ ptr = MLX5_ADDR_OF(definer, ptr, match_mask);
+ memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask));
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create Definer\n");
+ goto out;
+ }
+
+ *definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev,
+ u32 definer_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MATCH_DEFINER, definer_id);
+}
+
+int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_packet_reformat_create_attr *attr,
+ u32 *reformat_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ size_t insz, cmd_data_sz, cmd_total_sz;
+ void *prctx;
+ void *pdata;
+ void *in;
+ int ret;
+
+ cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+ cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in);
+ cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data);
+ insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE);
+ in = kzalloc(insz, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+ prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in,
+ packet_reformat_context);
+ pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz);
+ memcpy(pdata, attr->data, attr->data_sz);
+
+ ret = mlx5_cmd_exec(mdev, in, insz, out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create packet reformat\n");
+ goto out;
+ }
+
+ *reformat_id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+out:
+ kfree(in);
+ return ret;
+}
+
+int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
+ u32 reformat_id)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_in)] = {0};
+ int ret;
+
+ MLX5_SET(dealloc_packet_reformat_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(dealloc_packet_reformat_in, in,
+ packet_reformat_id, reformat_id);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to destroy packet_reformat\n");
+
+ return ret;
+}
+
+int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+ void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ int ret;
+
+ MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+ MLX5_SET(modify_sq_in, in, sqn, sqn);
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to modify SQ\n");
+
+ return ret;
+}
+
+int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_allow_other_vhca_access_attr *attr)
+{
+ u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
+ void *key;
+ int ret;
+
+ MLX5_SET(allow_other_vhca_access_in,
+ in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
+ MLX5_SET(allow_other_vhca_access_in,
+ in, object_type_to_be_accessed, attr->obj_type);
+ MLX5_SET(allow_other_vhca_access_in,
+ in, object_id_to_be_accessed, attr->obj_id);
+
+ key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
+ memcpy(key, attr->access_key, sizeof(attr->access_key));
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command\n");
+
+ return ret;
+}
+
+int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
+ u32 *obj_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
+ void *attr;
+ void *key;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, alias_attr->obj_type);
+ MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.alias_object, 1);
+
+ attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
+ MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
+ MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
+
+ key = MLX5_ADDR_OF(alias_context, attr, access_key);
+ memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create ALIAS OBJ\n");
+ goto out;
+ }
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
+ u16 obj_type,
+ u32 obj_id)
+{
+ return hws_cmd_general_obj_destroy(mdev, obj_type, obj_id);
+}
+
+int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_generate_wqe_attr *attr,
+ struct mlx5_cqe64 *ret_cqe)
+{
+ u32 out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0};
+ u8 status;
+ void *ptr;
+ int ret;
+
+ MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE);
+ MLX5_SET(generate_wqe_in, in, pdn, attr->pdn);
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl);
+ memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl));
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl);
+ memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl));
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0);
+ memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0));
+
+ if (attr->gta_data_1) {
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1);
+ memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1));
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to write GTA WQE using FW\n");
+ return ret;
+ }
+
+ status = MLX5_GET(generate_wqe_out, out, status);
+ if (status) {
+ mlx5_core_err(mdev, "Invalid FW CQE status %d\n", status);
+ return -EINVAL;
+ }
+
+ ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data);
+ memcpy(ret_cqe, ptr, sizeof(*ret_cqe));
+
+ return ret;
+}
+
+int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_query_caps *caps)
+{
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
+ u32 out_size;
+ u32 *out;
+ int ret;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device caps\n");
+ goto out;
+ }
+
+ caps->wqe_based_update =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.wqe_based_flow_table_update_cap);
+
+ caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.eswitch_manager);
+
+ caps->flex_protocols = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_protocols);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
+ caps->flex_parser_id_geneve_tlv_option_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_geneve_tlv_option_0);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
+ caps->flex_parser_id_mpls_over_gre =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_gre);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
+ caps->flex_parser_id_mpls_over_udp =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_udp_label);
+
+ caps->log_header_modify_argument_granularity =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_granularity);
+
+ caps->log_header_modify_argument_granularity -=
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_granularity_offset);
+
+ caps->log_header_modify_argument_max_alloc =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_max_alloc);
+
+ caps->definer_format_sup =
+ MLX5_GET64(query_hca_cap_out, out,
+ capability.cmd_hca_cap.match_definer_format_supported);
+
+ caps->vhca_id = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.vhca_id);
+
+ caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.sq_ts_format);
+
+ caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.ipsec_offload);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device caps 2\n");
+ goto out;
+ }
+
+ caps->full_dw_jumbo_support =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_8_6_ext);
+
+ caps->format_select_gtpu_dw_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_0);
+
+ caps->format_select_gtpu_dw_1 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_1);
+
+ caps->format_select_gtpu_dw_2 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_2);
+
+ caps->format_select_gtpu_ext_dw_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_first_ext_dw_0);
+
+ caps->supp_type_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.generate_wqe_type);
+
+ caps->flow_table_hash_type =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.flow_table_hash_type);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query flow table caps\n");
+ goto out;
+ }
+
+ caps->nic_ft.max_level =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level);
+
+ caps->nic_ft.reparse =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse);
+
+ caps->nic_ft.ignore_flow_level_rtc_valid =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.ignore_flow_level_rtc_valid);
+
+ caps->flex_parser_ok_bits_supp =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
+
+ if (caps->wqe_based_update) {
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query WQE based FT caps\n");
+ goto out;
+ }
+
+ caps->rtc_reparse_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_reparse_mode);
+
+ caps->ste_format =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_format);
+
+ caps->rtc_index_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_index_mode);
+
+ caps->rtc_log_depth_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_log_depth_max);
+
+ caps->ste_alloc_log_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_alloc_log_max);
+
+ caps->ste_alloc_log_gran =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_alloc_log_granularity);
+
+ caps->trivial_match_definer =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.trivial_match_definer);
+
+ caps->stc_alloc_log_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.stc_alloc_log_max);
+
+ caps->stc_alloc_log_gran =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.stc_alloc_log_granularity);
+
+ caps->rtc_hash_split_table =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_hash_split_table);
+
+ caps->rtc_linear_lookup_table =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_linear_lookup_table);
+
+ caps->access_index_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.access_index_mode);
+
+ caps->linear_match_definer =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.linear_match_definer_reg_c3);
+
+ caps->rtc_max_hash_def_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_max_num_hash_definer_gen_wqe);
+
+ caps->supp_ste_format_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_format_gen_wqe);
+
+ caps->fdb_tir_stc =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.fdb_jump_to_tir_stc);
+ }
+
+ if (caps->eswitch_manager) {
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query flow table esw caps\n");
+ goto out;
+ }
+
+ caps->fdb_ft.max_level =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level);
+
+ caps->fdb_ft.reparse =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_ESW | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query eswitch capabilities\n");
+ goto out;
+ }
+
+ if (MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.esw_manager_vport_number_valid))
+ caps->eswitch_manager_vport_number =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.esw_manager_vport_number);
+
+ caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.merged_eswitch);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device attributes\n");
+ goto out;
+ }
+
+ snprintf(caps->fw_ver, sizeof(caps->fw_ver), "%d.%d.%d",
+ fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+
+ caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
+
+out:
+ kfree(out);
+ return ret;
+}
+
+int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
+ u16 vport_number, u16 *gvmi)
+{
+ bool ec_vf_func = other_function ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+ int out_size;
+ void *out;
+ int err;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, other_function, other_function);
+ MLX5_SET(query_hca_cap_in, in, function_id,
+ mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
+ MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | HCA_CAP_OPMOD_GET_CUR);
+
+ err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ if (err) {
+ kfree(out);
+ return err;
+ }
+
+ *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
+
+ kfree(out);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h
new file mode 100644
index 000000000000..2fbcf4ff571a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_CMD_H_
+#define MLX5HWS_CMD_H_
+
+#define WIRE_PORT 0xFFFF
+
+#define ACCESS_KEY_LEN 32
+
+enum mlx5hws_cmd_ext_dest_flags {
+ MLX5HWS_CMD_EXT_DEST_REFORMAT = 1 << 0,
+ MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1,
+};
+
+struct mlx5hws_cmd_set_fte_dest {
+ u8 destination_type;
+ u32 destination_id;
+ enum mlx5hws_cmd_ext_dest_flags ext_flags;
+ u32 ext_reformat_id;
+ u16 esw_owner_vhca_id;
+};
+
+struct mlx5hws_cmd_set_fte_attr {
+ u32 action_flags;
+ bool ignore_flow_level;
+ u8 flow_source;
+ u8 extended_dest;
+ u8 encrypt_decrypt_type;
+ u32 encrypt_decrypt_obj_id;
+ u32 packet_reformat_id;
+ u32 dests_num;
+ struct mlx5hws_cmd_set_fte_dest *dests;
+};
+
+struct mlx5hws_cmd_ft_create_attr {
+ u8 type;
+ u8 level;
+ bool rtc_valid;
+ bool decap_en;
+ bool reformat_en;
+};
+
+struct mlx5hws_cmd_ft_modify_attr {
+ u8 type;
+ u32 rtc_id_0;
+ u32 rtc_id_1;
+ u32 table_miss_id;
+ u8 table_miss_action;
+ u64 modify_fs;
+};
+
+struct mlx5hws_cmd_ft_query_attr {
+ u8 type;
+};
+
+struct mlx5hws_cmd_fg_attr {
+ u32 table_id;
+ u32 table_type;
+};
+
+struct mlx5hws_cmd_forward_tbl {
+ u8 type;
+ u32 ft_id;
+ u32 fg_id;
+ u32 refcount;
+};
+
+struct mlx5hws_cmd_rtc_create_attr {
+ u32 pd;
+ u32 stc_base;
+ u32 ste_base;
+ u32 ste_offset;
+ u32 miss_ft_id;
+ bool fw_gen_wqe;
+ u8 update_index_mode;
+ u8 access_index_mode;
+ u8 num_hash_definer;
+ u8 log_depth;
+ u8 log_size;
+ u8 table_type;
+ u8 match_definer_0;
+ u8 match_definer_1;
+ u8 reparse_mode;
+ bool is_frst_jumbo;
+ bool is_scnd_range;
+};
+
+struct mlx5hws_cmd_alias_obj_create_attr {
+ u32 obj_id;
+ u16 vhca_id;
+ u16 obj_type;
+ u8 access_key[ACCESS_KEY_LEN];
+};
+
+struct mlx5hws_cmd_stc_create_attr {
+ u8 log_obj_range;
+ u8 table_type;
+};
+
+struct mlx5hws_cmd_stc_modify_attr {
+ u32 stc_offset;
+ u8 action_offset;
+ u8 reparse_mode;
+ enum mlx5_ifc_stc_action_type action_type;
+ union {
+ u32 id; /* TIRN, TAG, FT ID, STE ID, CRYPTO */
+ struct {
+ u8 decap;
+ u16 start_anchor;
+ u16 end_anchor;
+ } remove_header;
+ struct {
+ u32 arg_id;
+ u32 pattern_id;
+ } modify_header;
+ struct {
+ __be64 data;
+ } modify_action;
+ struct {
+ u32 arg_id;
+ u32 header_size;
+ u8 is_inline;
+ u8 encap;
+ u16 insert_anchor;
+ u16 insert_offset;
+ } insert_header;
+ struct {
+ u8 aso_type;
+ u32 devx_obj_id;
+ u8 return_reg_id;
+ } aso;
+ struct {
+ u16 vport_num;
+ u16 esw_owner_vhca_id;
+ u8 eswitch_owner_vhca_id_valid;
+ } vport;
+ struct {
+ struct mlx5hws_pool_chunk ste;
+ struct mlx5hws_pool *ste_pool;
+ u32 ste_obj_id; /* Internal */
+ u32 match_definer_id;
+ u8 log_hash_size;
+ bool ignore_tx;
+ } ste_table;
+ struct {
+ u16 start_anchor;
+ u16 num_of_words;
+ } remove_words;
+ struct {
+ u8 type;
+ u8 op;
+ u8 size;
+ } reformat_trailer;
+
+ u32 dest_table_id;
+ u32 dest_tir_num;
+ };
+};
+
+struct mlx5hws_cmd_ste_create_attr {
+ u8 log_obj_range;
+ u8 table_type;
+};
+
+struct mlx5hws_cmd_definer_create_attr {
+ u8 *dw_selector;
+ u8 *byte_selector;
+ u8 *match_mask;
+};
+
+struct mlx5hws_cmd_allow_other_vhca_access_attr {
+ u16 obj_type;
+ u32 obj_id;
+ u8 access_key[ACCESS_KEY_LEN];
+};
+
+struct mlx5hws_cmd_packet_reformat_create_attr {
+ u8 type;
+ size_t data_sz;
+ void *data;
+ u8 reformat_param_0;
+};
+
+struct mlx5hws_cmd_query_ft_caps {
+ u8 max_level;
+ u8 reparse;
+ u8 ignore_flow_level_rtc_valid;
+};
+
+struct mlx5hws_cmd_generate_wqe_attr {
+ u8 *wqe_ctrl;
+ u8 *gta_ctrl;
+ u8 *gta_data_0;
+ u8 *gta_data_1;
+ u32 pdn;
+};
+
+struct mlx5hws_cmd_query_caps {
+ u32 flex_protocols;
+ u8 wqe_based_update;
+ u8 rtc_reparse_mode;
+ u16 ste_format;
+ u8 rtc_index_mode;
+ u8 ste_alloc_log_max;
+ u8 ste_alloc_log_gran;
+ u8 stc_alloc_log_max;
+ u8 stc_alloc_log_gran;
+ u8 rtc_log_depth_max;
+ u8 format_select_gtpu_dw_0;
+ u8 format_select_gtpu_dw_1;
+ u8 flow_table_hash_type;
+ u8 format_select_gtpu_dw_2;
+ u8 format_select_gtpu_ext_dw_0;
+ u8 access_index_mode;
+ u32 linear_match_definer;
+ bool full_dw_jumbo_support;
+ bool rtc_hash_split_table;
+ bool rtc_linear_lookup_table;
+ u32 supp_type_gen_wqe;
+ u8 rtc_max_hash_def_gen_wqe;
+ u16 supp_ste_format_gen_wqe;
+ struct mlx5hws_cmd_query_ft_caps nic_ft;
+ struct mlx5hws_cmd_query_ft_caps fdb_ft;
+ bool eswitch_manager;
+ bool merged_eswitch;
+ u32 eswitch_manager_vport_number;
+ u8 log_header_modify_argument_granularity;
+ u8 log_header_modify_argument_max_alloc;
+ u8 sq_ts_format;
+ u8 fdb_tir_stc;
+ u64 definer_format_sup;
+ u32 trivial_match_definer;
+ u32 vhca_id;
+ u32 shared_vhca_id;
+ char fw_ver[64];
+ bool ipsec_offload;
+ bool is_ecpf;
+ u8 flex_parser_ok_bits_supp;
+ u8 flex_parser_id_geneve_tlv_option_0;
+ u8 flex_parser_id_mpls_over_gre;
+ u8 flex_parser_id_mpls_over_udp;
+};
+
+int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ u32 *table_id);
+
+int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr,
+ u32 table_id);
+
+int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
+ u32 obj_id,
+ struct mlx5hws_cmd_ft_query_attr *ft_attr,
+ u64 *icm_addr_0, u64 *icm_addr_1);
+
+int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u8 fw_ft_type, u32 table_id);
+
+void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u32 table_id);
+
+int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ u32 *rtc_id);
+
+void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id);
+
+int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_create_attr *stc_attr,
+ u32 *stc_id);
+
+int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev,
+ u32 stc_id,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr);
+
+void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id);
+
+int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_generate_wqe_attr *attr,
+ struct mlx5_cqe64 *ret_cqe);
+
+int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ste_create_attr *ste_attr,
+ u32 *ste_id);
+
+void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id);
+
+int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_definer_create_attr *def_attr,
+ u32 *definer_id);
+
+void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev,
+ u32 definer_id);
+
+int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
+ u16 log_obj_range,
+ u32 pd,
+ u32 *arg_id);
+
+void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev,
+ u32 arg_id);
+
+int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev,
+ u32 pattern_length,
+ u8 *actions,
+ u32 *ptrn_id);
+
+void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev,
+ u32 ptrn_id);
+
+int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_packet_reformat_create_attr *attr,
+ u32 *reformat_id);
+
+int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
+ u32 reformat_id);
+
+int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr);
+
+int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev,
+ u32 table_type, u32 table_id);
+
+struct mlx5hws_cmd_forward_tbl *
+mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr);
+
+void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_forward_tbl *tbl);
+
+int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
+ u32 *obj_id);
+
+int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
+ u16 obj_type,
+ u32 obj_id);
+
+int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn);
+
+int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_query_caps *caps);
+
+void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
+ u32 fw_ft_type,
+ enum mlx5hws_table_type type,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr);
+
+int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_allow_other_vhca_access_attr *attr);
+
+int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
+ u16 vport_number, u16 *gvmi);
+
+#endif /* MLX5HWS_CMD_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c
new file mode 100644
index 000000000000..00e4fdf4a558
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */
+
+#include "mlx5hws_internal.h"
+
+bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx)
+{
+ return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC);
+}
+
+u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx)
+{
+ /* Prefer to use dynamic reparse, reparse only specific actions */
+ if (mlx5hws_context_cap_dynamic_reparse(ctx))
+ return MLX5_IFC_RTC_REPARSE_NEVER;
+
+ /* Otherwise use less efficient static */
+ return MLX5_IFC_RTC_REPARSE_ALWAYS;
+}
+
+static int hws_context_pools_init(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_pool_attr pool_attr = {0};
+ u8 max_log_sz;
+ int ret;
+ int i;
+
+ ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
+ if (ret)
+ return ret;
+
+ ret = mlx5hws_definer_init_cache(&ctx->definer_cache);
+ if (ret)
+ goto uninit_pat_cache;
+
+ /* Create an STC pool per FT type */
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL;
+ max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
+ pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
+
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+ pool_attr.table_type = i;
+ ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!ctx->stc_pool[i]) {
+ mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i);
+ ret = -ENOMEM;
+ goto free_stc_pools;
+ }
+ }
+
+ return 0;
+
+free_stc_pools:
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++)
+ if (ctx->stc_pool[i])
+ mlx5hws_pool_destroy(ctx->stc_pool[i]);
+
+ mlx5hws_definer_uninit_cache(ctx->definer_cache);
+uninit_pat_cache:
+ mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
+ return ret;
+}
+
+static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+ if (ctx->stc_pool[i])
+ mlx5hws_pool_destroy(ctx->stc_pool[i]);
+ }
+
+ mlx5hws_definer_uninit_cache(ctx->definer_cache);
+ mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
+}
+
+static int hws_context_init_pd(struct mlx5hws_context *ctx)
+{
+ int ret = 0;
+
+ ret = mlx5_core_alloc_pd(ctx->mdev, &ctx->pd_num);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate PD\n");
+ return ret;
+ }
+
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_PRIVATE_PD;
+
+ return 0;
+}
+
+static int hws_context_uninit_pd(struct mlx5hws_context *ctx)
+{
+ if (ctx->flags & MLX5HWS_CONTEXT_FLAG_PRIVATE_PD)
+ mlx5_core_dealloc_pd(ctx->mdev, ctx->pd_num);
+
+ return 0;
+}
+
+static void hws_context_check_hws_supp(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ /* HWS not supported on device / FW */
+ if (!caps->wqe_based_update) {
+ mlx5hws_err(ctx, "Required HWS WQE based insertion cap not supported\n");
+ return;
+ }
+
+ if (!caps->eswitch_manager) {
+ mlx5hws_err(ctx, "HWS is not supported for non eswitch manager port\n");
+ return;
+ }
+
+ /* Current solution requires all rules to set reparse bit */
+ if ((!caps->nic_ft.reparse ||
+ (!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
+ !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
+ mlx5hws_err(ctx, "Required HWS reparse cap not supported\n");
+ return;
+ }
+
+ /* FW/HW must support 8DW STE */
+ if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
+ mlx5hws_err(ctx, "Required HWS STE format not supported\n");
+ return;
+ }
+
+ /* Adding rules by hash and by offset are requirements */
+ if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
+ !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
+ mlx5hws_err(ctx, "Required HWS RTC update mode not supported\n");
+ return;
+ }
+
+ /* Support for SELECT definer ID is required */
+ if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
+ mlx5hws_err(ctx, "Required HWS Dynamic definer not supported\n");
+ return;
+ }
+
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT;
+}
+
+static int hws_context_init_hws(struct mlx5hws_context *ctx,
+ struct mlx5hws_context_attr *attr)
+{
+ int ret;
+
+ hws_context_check_hws_supp(ctx);
+
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
+ return 0;
+
+ ret = hws_context_init_pd(ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_context_pools_init(ctx);
+ if (ret)
+ goto uninit_pd;
+
+ if (attr->bwc)
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+
+ ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
+ if (ret)
+ goto pools_uninit;
+
+ INIT_LIST_HEAD(&ctx->tbl_list);
+
+ return 0;
+
+pools_uninit:
+ hws_context_pools_uninit(ctx);
+uninit_pd:
+ hws_context_uninit_pd(ctx);
+ return ret;
+}
+
+static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
+ return;
+
+ mlx5hws_send_queues_close(ctx);
+ hws_context_pools_uninit(ctx);
+ hws_context_uninit_pd(ctx);
+}
+
+struct mlx5hws_context *mlx5hws_context_open(struct mlx5_core_dev *mdev,
+ struct mlx5hws_context_attr *attr)
+{
+ struct mlx5hws_context *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->mdev = mdev;
+
+ mutex_init(&ctx->ctrl_lock);
+ xa_init(&ctx->peer_ctx_xa);
+
+ ctx->caps = kzalloc(sizeof(*ctx->caps), GFP_KERNEL);
+ if (!ctx->caps)
+ goto free_ctx;
+
+ ret = mlx5hws_cmd_query_caps(mdev, ctx->caps);
+ if (ret)
+ goto free_caps;
+
+ ret = mlx5hws_vport_init_vports(ctx);
+ if (ret)
+ goto free_caps;
+
+ ret = hws_context_init_hws(ctx, attr);
+ if (ret)
+ goto uninit_vports;
+
+ mlx5hws_debug_init_dump(ctx);
+
+ return ctx;
+
+uninit_vports:
+ mlx5hws_vport_uninit_vports(ctx);
+free_caps:
+ kfree(ctx->caps);
+free_ctx:
+ xa_destroy(&ctx->peer_ctx_xa);
+ mutex_destroy(&ctx->ctrl_lock);
+ kfree(ctx);
+ return NULL;
+}
+
+int mlx5hws_context_close(struct mlx5hws_context *ctx)
+{
+ mlx5hws_debug_uninit_dump(ctx);
+ hws_context_uninit_hws(ctx);
+ mlx5hws_vport_uninit_vports(ctx);
+ kfree(ctx->caps);
+ xa_destroy(&ctx->peer_ctx_xa);
+ mutex_destroy(&ctx->ctrl_lock);
+ kfree(ctx);
+ return 0;
+}
+
+void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
+ struct mlx5hws_context *peer_ctx,
+ u16 peer_vhca_id)
+{
+ mutex_lock(&ctx->ctrl_lock);
+
+ if (xa_err(xa_store(&ctx->peer_ctx_xa, peer_vhca_id, peer_ctx, GFP_KERNEL)))
+ pr_warn("HWS: failed storing peer vhca ID in peer xarray\n");
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
new file mode 100644
index 000000000000..8ab548aa402b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_CONTEXT_H_
+#define MLX5HWS_CONTEXT_H_
+
+enum mlx5hws_context_flags {
+ MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
+ MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
+ MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
+};
+
+enum mlx5hws_context_shared_stc_type {
+ MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3 = 0,
+ MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP = 1,
+ MLX5HWS_CONTEXT_SHARED_STC_MAX = 2,
+};
+
+struct mlx5hws_context_common_res {
+ struct mlx5hws_action_default_stc *default_stc;
+ struct mlx5hws_action_shared_stc *shared_stc[MLX5HWS_CONTEXT_SHARED_STC_MAX];
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+};
+
+struct mlx5hws_context_debug_info {
+ struct dentry *steering_debugfs;
+ struct dentry *fdb_debugfs;
+};
+
+struct mlx5hws_context_vports {
+ u16 esw_manager_gvmi;
+ u16 uplink_gvmi;
+ struct xarray vport_gvmi_xa;
+};
+
+struct mlx5hws_context {
+ struct mlx5_core_dev *mdev;
+ struct mlx5hws_cmd_query_caps *caps;
+ u32 pd_num;
+ struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_pattern_cache *pattern_cache;
+ struct mlx5hws_definer_cache *definer_cache;
+ struct mutex ctrl_lock; /* control lock to protect the whole context */
+ enum mlx5hws_context_flags flags;
+ struct mlx5hws_send_engine *send_queue;
+ size_t queues;
+ struct mutex *bwc_send_queue_locks; /* protect BWC queues */
+ struct lock_class_key *bwc_lock_class_keys;
+ struct list_head tbl_list;
+ struct mlx5hws_context_debug_info debug_info;
+ struct xarray peer_ctx_xa;
+ struct mlx5hws_context_vports vports;
+};
+
+static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
+{
+ return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+}
+
+bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
+
+u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
+
+#endif /* MLX5HWS_CONTEXT_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c
new file mode 100644
index 000000000000..2b8c5a4e1c4c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/version.h>
+#include "mlx5hws_internal.h"
+
+static int
+hws_debug_dump_matcher_template_definer(struct seq_file *f,
+ void *parent_obj,
+ struct mlx5hws_definer *definer,
+ enum mlx5hws_debug_res_type type)
+{
+ int i;
+
+ if (!definer)
+ return 0;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,",
+ type,
+ HWS_PTR_TO_ID(definer),
+ HWS_PTR_TO_ID(parent_obj),
+ definer->obj_id,
+ definer->type);
+
+ for (i = 0; i < DW_SELECTORS; i++)
+ seq_printf(f, "0x%x%s", definer->dw_selector[i],
+ (i == DW_SELECTORS - 1) ? "," : "-");
+
+ for (i = 0; i < BYTE_SELECTORS; i++)
+ seq_printf(f, "0x%x%s", definer->byte_selector[i],
+ (i == BYTE_SELECTORS - 1) ? "," : "-");
+
+ for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
+ seq_printf(f, "%02x", definer->mask.jumbo[i]);
+
+ seq_puts(f, "\n");
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_match_template(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_debug_res_type type;
+ int i, ret;
+
+ for (i = 0; i < matcher->num_of_mt; i++) {
+ struct mlx5hws_match_template *mt = &matcher->mt[i];
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE,
+ HWS_PTR_TO_ID(mt),
+ HWS_PTR_TO_ID(matcher),
+ mt->fc_sz,
+ 0, 0);
+
+ type = MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER;
+ ret = hws_debug_dump_matcher_template_definer(f, mt, mt->definer, type);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_action_template(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_action_type action_type;
+ int i, j;
+
+ for (i = 0; i < matcher->num_of_at; i++) {
+ struct mlx5hws_action_template *at = &matcher->at[i];
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE,
+ HWS_PTR_TO_ID(at),
+ HWS_PTR_TO_ID(matcher),
+ at->only_term,
+ at->num_of_action_stes,
+ at->num_actions);
+
+ for (j = 0; j < at->num_actions; j++) {
+ action_type = at->action_type_arr[j];
+ seq_printf(f, ",%s", mlx5hws_action_type_to_str(action_type));
+ }
+
+ seq_puts(f, "\n");
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_attr(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR,
+ HWS_PTR_TO_ID(matcher),
+ attr->priority,
+ attr->mode,
+ attr->table.sz_row_log,
+ attr->table.sz_col_log,
+ attr->optimize_using_rule_idx,
+ attr->optimize_flow_src,
+ attr->insert_mode,
+ attr->distribute_mode);
+
+ return 0;
+}
+
+static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_table_type tbl_type = matcher->tbl->type;
+ struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
+ struct mlx5hws_pool_chunk *ste;
+ struct mlx5hws_pool *ste_pool;
+ u64 icm_addr_0 = 0;
+ u64 icm_addr_1 = 0;
+ u32 ste_0_id = -1;
+ u32 ste_1_id = -1;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,0x%llx",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER,
+ HWS_PTR_TO_ID(matcher),
+ HWS_PTR_TO_ID(matcher->tbl),
+ matcher->num_of_mt,
+ matcher->end_ft_id,
+ matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0);
+
+ ste = &matcher->match_ste.ste;
+ ste_pool = matcher->match_ste.pool;
+ if (ste_pool) {
+ ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ }
+
+ seq_printf(f, ",%d,%d,%d,%d",
+ matcher->match_ste.rtc_0_id,
+ (int)ste_0_id,
+ matcher->match_ste.rtc_1_id,
+ (int)ste_1_id);
+
+ ste = &matcher->action_ste[0].ste;
+ ste_pool = matcher->action_ste[0].pool;
+ if (ste_pool) {
+ ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ else
+ ste_1_id = -1;
+ } else {
+ ste_0_id = -1;
+ ste_1_id = -1;
+ }
+
+ ft_attr.type = matcher->tbl->fw_ft_type;
+ ret = mlx5hws_cmd_flow_table_query(matcher->tbl->ctx->mdev,
+ matcher->end_ft_id,
+ &ft_attr,
+ &icm_addr_0,
+ &icm_addr_1);
+ if (ret)
+ return ret;
+
+ seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n",
+ matcher->action_ste[0].rtc_0_id,
+ (int)ste_0_id,
+ matcher->action_ste[0].rtc_1_id,
+ (int)ste_1_id,
+ 0,
+ mlx5hws_debug_icm_to_idx(icm_addr_0),
+ mlx5hws_debug_icm_to_idx(icm_addr_1));
+
+ ret = hws_debug_dump_matcher_attr(f, matcher);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_matcher_match_template(f, matcher);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_matcher_action_template(f, matcher);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hws_debug_dump_table(struct seq_file *f, struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
+ struct mlx5hws_matcher *matcher;
+ u64 local_icm_addr_0 = 0;
+ u64 local_icm_addr_1 = 0;
+ u64 icm_addr_0 = 0;
+ u64 icm_addr_1 = 0;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d,%d,%d",
+ MLX5HWS_DEBUG_RES_TYPE_TABLE,
+ HWS_PTR_TO_ID(tbl),
+ HWS_PTR_TO_ID(tbl->ctx),
+ tbl->ft_id,
+ MLX5HWS_TABLE_TYPE_BASE + tbl->type,
+ tbl->fw_ft_type,
+ tbl->level,
+ 0);
+
+ ft_attr.type = tbl->fw_ft_type;
+ ret = mlx5hws_cmd_flow_table_query(tbl->ctx->mdev,
+ tbl->ft_id,
+ &ft_attr,
+ &icm_addr_0,
+ &icm_addr_1);
+ if (ret)
+ return ret;
+
+ seq_printf(f, ",0x%llx,0x%llx,0x%llx,0x%llx,0x%llx\n",
+ mlx5hws_debug_icm_to_idx(icm_addr_0),
+ mlx5hws_debug_icm_to_idx(icm_addr_1),
+ mlx5hws_debug_icm_to_idx(local_icm_addr_0),
+ mlx5hws_debug_icm_to_idx(local_icm_addr_1),
+ HWS_PTR_TO_ID(tbl->default_miss.miss_tbl));
+
+ list_for_each_entry(matcher, &tbl->matchers_list, list_node) {
+ ret = hws_debug_dump_matcher(f, matcher);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_context_send_engine(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_send_engine *send_queue;
+ struct mlx5hws_send_ring *send_ring;
+ struct mlx5hws_send_ring_cq *cq;
+ struct mlx5hws_send_ring_sq *sq;
+ int i;
+
+ for (i = 0; i < (int)ctx->queues; i++) {
+ send_queue = &ctx->send_queue[i];
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE,
+ HWS_PTR_TO_ID(ctx),
+ i,
+ send_queue->used_entries,
+ send_queue->num_entries,
+ 1, /* one send ring per queue */
+ send_queue->num_entries,
+ send_queue->err,
+ send_queue->completed.ci,
+ send_queue->completed.pi,
+ send_queue->completed.mask);
+
+ send_ring = &send_queue->send_ring;
+ cq = &send_ring->send_cq;
+ sq = &send_ring->send_sq;
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING,
+ HWS_PTR_TO_ID(ctx),
+ 0, /* one send ring per send queue */
+ i,
+ cq->mcq.cqn,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ cq->mcq.cqe_sz,
+ sq->sqn,
+ 0,
+ 0,
+ 0);
+ }
+
+ return 0;
+}
+
+static int hws_debug_dump_context_caps(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ seq_printf(f, "%d,0x%llx,%s,%d,%d,%d,%d,",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS,
+ HWS_PTR_TO_ID(ctx),
+ caps->fw_ver,
+ caps->wqe_based_update,
+ caps->ste_format,
+ caps->ste_alloc_log_max,
+ caps->log_header_modify_argument_max_alloc);
+
+ seq_printf(f, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%s\n",
+ caps->flex_protocols,
+ caps->rtc_reparse_mode,
+ caps->rtc_index_mode,
+ caps->ste_alloc_log_gran,
+ caps->stc_alloc_log_max,
+ caps->stc_alloc_log_gran,
+ caps->rtc_log_depth_max,
+ caps->format_select_gtpu_dw_0,
+ caps->format_select_gtpu_dw_1,
+ caps->format_select_gtpu_dw_2,
+ caps->format_select_gtpu_ext_dw_0,
+ caps->nic_ft.max_level,
+ caps->nic_ft.reparse,
+ caps->fdb_ft.max_level,
+ caps->fdb_ft.reparse,
+ caps->log_header_modify_argument_granularity,
+ caps->linear_match_definer,
+ "regc_3");
+
+ return 0;
+}
+
+static int hws_debug_dump_context_attr(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ seq_printf(f, "%u,0x%llx,%d,%zu,%d,%s,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR,
+ HWS_PTR_TO_ID(ctx),
+ ctx->pd_num,
+ ctx->queues,
+ ctx->send_queue->num_entries,
+ "None", /* no shared gvmi */
+ ctx->caps->vhca_id,
+ 0xffff); /* no shared gvmi */
+
+ return 0;
+}
+
+static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5_core_dev *dev = ctx->mdev;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,%d,%s,%s.KERNEL_%u_%u_%u\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT,
+ HWS_PTR_TO_ID(ctx),
+ ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT,
+ pci_name(dev->pdev),
+ HWS_DEBUG_FORMAT_VERSION,
+ LINUX_VERSION_MAJOR,
+ LINUX_VERSION_PATCHLEVEL,
+ LINUX_VERSION_SUBLEVEL);
+
+ ret = hws_debug_dump_context_attr(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_caps(f, ctx);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hws_debug_dump_context_stc_resource(struct seq_file *f,
+ struct mlx5hws_context *ctx,
+ u32 tbl_type,
+ struct mlx5hws_pool_resource *resource)
+{
+ seq_printf(f, "%d,0x%llx,%u,%u\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC,
+ HWS_PTR_TO_ID(ctx),
+ tbl_type,
+ resource->base_id);
+
+ return 0;
+}
+
+static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_pool *stc_pool;
+ u32 table_type;
+ int ret;
+ int i;
+
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+ stc_pool = ctx->stc_pool[i];
+ table_type = MLX5HWS_TABLE_TYPE_BASE + i;
+
+ if (!stc_pool)
+ continue;
+
+ if (stc_pool->resource[0]) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
+ stc_pool->resource[0]);
+ if (ret)
+ return ret;
+ }
+
+ if (i == MLX5HWS_TABLE_TYPE_FDB && stc_pool->mirror_resource[0]) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
+ stc_pool->mirror_resource[0]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_table *tbl;
+ int ret;
+
+ ret = hws_debug_dump_context_info(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_send_engine(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_stc(f, ctx);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(tbl, &ctx->tbl_list, tbl_list_node) {
+ ret = hws_debug_dump_table(f, tbl);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ int ret;
+
+ if (!f || !ctx)
+ return -EINVAL;
+
+ mutex_lock(&ctx->ctrl_lock);
+ ret = hws_debug_dump_context(f, ctx);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return ret;
+}
+
+static int hws_dump_show(struct seq_file *file, void *priv)
+{
+ return hws_debug_dump(file, file->private);
+}
+DEFINE_SHOW_ATTRIBUTE(hws_dump);
+
+void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx)
+{
+ struct mlx5_core_dev *dev = ctx->mdev;
+ char file_name[128];
+
+ ctx->debug_info.steering_debugfs =
+ debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev));
+ ctx->debug_info.fdb_debugfs =
+ debugfs_create_dir("fdb", ctx->debug_info.steering_debugfs);
+
+ sprintf(file_name, "ctx_%p", ctx);
+ debugfs_create_file(file_name, 0444, ctx->debug_info.fdb_debugfs,
+ ctx, &hws_dump_fops);
+}
+
+void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx)
+{
+ debugfs_remove_recursive(ctx->debug_info.steering_debugfs);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h
new file mode 100644
index 000000000000..b93a536035d9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_DEBUG_H_
+#define MLX5HWS_DEBUG_H_
+
+#define HWS_DEBUG_FORMAT_VERSION "1.0"
+
+#define HWS_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL)
+
+enum mlx5hws_debug_res_type {
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT = 4000,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR = 4001,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS = 4002,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE = 4003,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING = 4004,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC = 4005,
+
+ MLX5HWS_DEBUG_RES_TYPE_TABLE = 4100,
+
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER = 4200,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR = 4201,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE = 4202,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER = 4203,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE = 4204,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_HASH_DEFINER = 4205,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_RANGE_DEFINER = 4206,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_COMPARE_MATCH_DEFINER = 4207,
+};
+
+static inline u64
+mlx5hws_debug_icm_to_idx(u64 icm_addr)
+{
+ return (icm_addr >> 6) & 0xffffffff;
+}
+
+void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx);
+void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx);
+
+#endif /* MLX5HWS_DEBUG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
new file mode 100644
index 000000000000..3f4c58bada37
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
@@ -0,0 +1,2146 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+/* Pattern tunnel Layer bits. */
+#define MLX5_FLOW_LAYER_VXLAN BIT(12)
+#define MLX5_FLOW_LAYER_VXLAN_GPE BIT(13)
+#define MLX5_FLOW_LAYER_GRE BIT(14)
+#define MLX5_FLOW_LAYER_MPLS BIT(15)
+
+/* Pattern tunnel Layer bits (continued). */
+#define MLX5_FLOW_LAYER_IPIP BIT(23)
+#define MLX5_FLOW_LAYER_IPV6_ENCAP BIT(24)
+#define MLX5_FLOW_LAYER_NVGRE BIT(25)
+#define MLX5_FLOW_LAYER_GENEVE BIT(26)
+
+#define MLX5_FLOW_ITEM_FLEX_TUNNEL BIT_ULL(39)
+
+/* Tunnel Masks. */
+#define MLX5_FLOW_LAYER_TUNNEL \
+ (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
+ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
+ MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
+ MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
+ MLX5_FLOW_ITEM_FLEX_TUNNEL)
+
+#define GTP_PDU_SC 0x85
+#define BAD_PORT 0xBAD
+#define ETH_TYPE_IPV4_VXLAN 0x0800
+#define ETH_TYPE_IPV6_VXLAN 0x86DD
+#define UDP_GTPU_PORT 2152
+#define UDP_PORT_MPLS 6635
+#define UDP_GENEVE_PORT 6081
+#define UDP_ROCEV2_PORT 4791
+#define HWS_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
+
+#define STE_NO_VLAN 0x0
+#define STE_SVLAN 0x1
+#define STE_CVLAN 0x2
+#define STE_NO_L3 0x0
+#define STE_IPV4 0x1
+#define STE_IPV6 0x2
+#define STE_NO_L4 0x0
+#define STE_TCP 0x1
+#define STE_UDP 0x2
+#define STE_ICMP 0x3
+#define STE_ESP 0x3
+
+#define IPV4 0x4
+#define IPV6 0x6
+
+/* Setter function based on bit offset and mask, for 32bit DW */
+#define _HWS_SET32(p, v, byte_off, bit_off, mask) \
+ do { \
+ u32 _v = v; \
+ *((__be32 *)(p) + ((byte_off) / 4)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + \
+ ((byte_off) / 4))) & \
+ (~((mask) << (bit_off)))) | \
+ (((_v) & (mask)) << \
+ (bit_off))); \
+ } while (0)
+
+/* Setter function based on bit offset and mask, for unaligned 32bit DW */
+#define HWS_SET32(p, v, byte_off, bit_off, mask) \
+ do { \
+ if (unlikely((bit_off) < 0)) { \
+ u32 _bit_off = -1 * (bit_off); \
+ u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
+ _HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
+ _HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
+ (bit_off) % BITS_IN_DW, second_dw_mask); \
+ } else { \
+ _HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
+ } \
+ } while (0)
+
+/* Getter for up to aligned 32bit DW */
+#define HWS_GET32(p, byte_off, bit_off, mask) \
+ ((be32_to_cpu(*((__be32 *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask))
+
+#define HWS_CALC_FNAME(field, inner) \
+ ((inner) ? MLX5HWS_DEFINER_FNAME_##field##_I : \
+ MLX5HWS_DEFINER_FNAME_##field##_O)
+
+#define HWS_GET_MATCH_PARAM(match_param, hdr) \
+ MLX5_GET(fte_match_param, match_param, hdr)
+
+#define HWS_IS_FLD_SET(match_param, hdr) \
+ (!!(HWS_GET_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) ({ \
+ BUILD_BUG_ON((sz_in_bits) % 32); \
+ u32 sz = sz_in_bits; \
+ u32 res = 0; \
+ u32 dw_off = __mlx5_dw_off(fte_match_param, hdr); \
+ while (!res && sz >= 32) { \
+ res = *((match_param) + (dw_off++)); \
+ sz -= 32; \
+ } \
+ res; \
+ })
+
+#define HWS_IS_FLD_SET_SZ(match_param, hdr, sz_in_bits) \
+ (((sz_in_bits) > 32) ? HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) : \
+ !!(HWS_GET_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_GET64_MATCH_PARAM(match_param, hdr) \
+ MLX5_GET64(fte_match_param, match_param, hdr)
+
+#define HWS_IS_FLD64_SET(match_param, hdr) \
+ (!!(HWS_GET64_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_CALC_HDR_SRC(fc, s_hdr) \
+ do { \
+ (fc)->s_bit_mask = __mlx5_mask(fte_match_param, s_hdr); \
+ (fc)->s_bit_off = __mlx5_dw_bit_off(fte_match_param, s_hdr); \
+ (fc)->s_byte_off = MLX5_BYTE_OFF(fte_match_param, s_hdr); \
+ } while (0)
+
+#define HWS_CALC_HDR_DST(fc, d_hdr) \
+ do { \
+ (fc)->bit_mask = __mlx5_mask(definer_hl, d_hdr); \
+ (fc)->bit_off = __mlx5_dw_bit_off(definer_hl, d_hdr); \
+ (fc)->byte_off = MLX5_BYTE_OFF(definer_hl, d_hdr); \
+ } while (0)
+
+#define HWS_CALC_HDR(fc, s_hdr, d_hdr) \
+ do { \
+ HWS_CALC_HDR_SRC(fc, s_hdr); \
+ HWS_CALC_HDR_DST(fc, d_hdr); \
+ (fc)->tag_set = &hws_definer_generic_set; \
+ } while (0)
+
+#define HWS_SET_HDR(fc_arr, match_param, fname, s_hdr, d_hdr) \
+ do { \
+ if (HWS_IS_FLD_SET(match_param, s_hdr)) \
+ HWS_CALC_HDR(&(fc_arr)[MLX5HWS_DEFINER_FNAME_##fname], s_hdr, d_hdr); \
+ } while (0)
+
+struct mlx5hws_definer_sel_ctrl {
+ u8 allowed_full_dw; /* Full DW selectors cover all offsets */
+ u8 allowed_lim_dw; /* Limited DW selectors cover offset < 64 */
+ u8 allowed_bytes; /* Bytes selectors, up to offset 255 */
+ u8 used_full_dw;
+ u8 used_lim_dw;
+ u8 used_bytes;
+ u8 full_dw_selector[DW_SELECTORS];
+ u8 lim_dw_selector[DW_SELECTORS_LIMITED];
+ u8 byte_selector[BYTE_SELECTORS];
+};
+
+struct mlx5hws_definer_conv_data {
+ struct mlx5hws_context *ctx;
+ struct mlx5hws_definer_fc *fc;
+ /* enum mlx5hws_definer_match_flag */
+ u32 match_flags;
+};
+
+static void
+hws_definer_ones_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ HWS_SET32(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_generic_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ /* Can be optimized */
+ u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
+
+ HWS_SET32(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag))
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag))
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag))
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag))
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag,
+ bool inner)
+{
+ u32 second_cvlan_tag = inner ?
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) :
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag);
+ u32 second_svlan_tag = inner ?
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag) :
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag);
+
+ if (second_cvlan_tag)
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (second_svlan_tag)
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_second_vlan_type_set(fc, match_param, tag, true);
+}
+
+static void
+hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_second_vlan_type_set(fc, match_param, tag, false);
+}
+
+static void hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_code);
+ u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_type);
+ u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
+ (code << __mlx5_dw_bit_off(header_icmp, code));
+
+ HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_code);
+ u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_type);
+ u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
+ (code << __mlx5_dw_bit_off(header_icmp, code));
+
+ HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_l3_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
+
+ if (val == IPV4)
+ HWS_SET32(tag, STE_IPV4, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (val == IPV6)
+ HWS_SET32(tag, STE_IPV6, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_L3, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag,
+ struct mlx5hws_context *peer_ctx)
+{
+ u16 source_port = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port);
+ u16 vport_gvmi = 0;
+ int ret;
+
+ ret = mlx5hws_vport_get_gvmi(peer_ctx, source_port, &vport_gvmi);
+ if (ret) {
+ HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
+ mlx5hws_err(fc->ctx, "Vport 0x%x is disabled or invalid\n", source_port);
+ return;
+ }
+
+ if (vport_gvmi)
+ HWS_SET32(tag, vport_gvmi, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+__must_hold(&fc->ctx->ctrl_lock)
+{
+ int id = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_eswitch_owner_vhca_id);
+ struct mlx5hws_context *peer_ctx;
+
+ if (id == fc->ctx->caps->vhca_id)
+ peer_ctx = fc->ctx;
+ else
+ peer_ctx = xa_load(&fc->ctx->peer_ctx_xa, id);
+
+ if (!peer_ctx) {
+ HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
+ mlx5hws_err(fc->ctx, "Invalid vhca_id provided 0x%x\n", id);
+ return;
+ }
+
+ hws_definer_set_source_port_gvmi(fc, match_param, tag, peer_ctx);
+}
+
+static void
+hws_definer_set_source_gvmi(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_set_source_port_gvmi(fc, match_param, tag, fc->ctx);
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data *cd,
+ u8 parser_id)
+{
+ struct mlx5hws_definer_fc *fc;
+
+ switch (parser_id) {
+ case 0:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser0_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 1:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser1_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 2:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser2_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 3:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser3_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 4:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser4_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 5:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser5_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 6:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser6_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 7:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser7_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ default:
+ mlx5hws_err(cd->ctx, "Unsupported flex parser steering ok index %u\n", parser_id);
+ return NULL;
+ }
+
+ return fc;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data *cd,
+ u8 parser_id)
+{
+ struct mlx5hws_definer_fc *fc;
+
+ switch (parser_id) {
+ case 0:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_0);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 1:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_1);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 2:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_2);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 3:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_3);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 4:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_4);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 5:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_5);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 6:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_6);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 7:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_7);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ default:
+ mlx5hws_err(cd->ctx, "Unsupported flex parser %u\n", parser_id);
+ return NULL;
+ }
+
+ return fc;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data *cd,
+ bool *parser_is_used,
+ u32 id,
+ u32 value)
+{
+ if (id || value) {
+ if (id >= HWS_NUM_OF_FLEX_PARSERS) {
+ mlx5hws_err(cd->ctx, "Unsupported parser id\n");
+ return NULL;
+ }
+
+ if (parser_is_used[id]) {
+ mlx5hws_err(cd->ctx, "Parser id have been used\n");
+ return NULL;
+ }
+ }
+
+ parser_is_used[id] = true;
+
+ return hws_definer_flex_parser_handler(cd, id);
+}
+
+static int
+hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
+{
+ u32 flags;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2);
+
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 |
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 |
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_O |
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_I);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ return 0;
+
+err_conflict:
+ mlx5hws_err(cd->ctx, "Invalid definer fields combination\n");
+ return -EINVAL;
+}
+
+static int
+hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ u32 *s_ipv6, *d_ipv6;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c2, 0xe) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c4, 0x4)) {
+ mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n");
+ return -EINVAL;
+ }
+
+ /* L2 Check ethertype */
+ HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
+ outer_headers.ethertype,
+ eth_l2_outer.l3_ethertype);
+ /* L2 Check SMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_O,
+ outer_headers.smac_47_16, eth_l2_src_outer.smac_47_16);
+ /* L2 Check SMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_O,
+ outer_headers.smac_15_0, eth_l2_src_outer.smac_15_0);
+ /* L2 Check DMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_O,
+ outer_headers.dmac_47_16, eth_l2_outer.dmac_47_16);
+ /* L2 Check DMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_O,
+ outer_headers.dmac_15_0, eth_l2_outer.dmac_15_0);
+
+ /* L2 VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_O,
+ outer_headers.first_prio, eth_l2_outer.first_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_CFI_O,
+ outer_headers.first_cfi, eth_l2_outer.first_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_ID_O,
+ outer_headers.first_vid, eth_l2_outer.first_vlan_id);
+
+ /* L2 CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.first_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_outer_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ /* L3 Check IP header */
+ HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
+ outer_headers.ip_protocol,
+ eth_l3_outer.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_TTL_O,
+ outer_headers.ttl_hoplimit,
+ eth_l3_outer.time_to_live_hop_limit);
+
+ /* L3 Check IPv4/IPv6 addresses */
+ s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout);
+ d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
+
+ /* Assume IPv6 is used if ipv6 bits are set */
+ is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
+ is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ if (is_s_ipv6) {
+ /* Handle IPv6 source address */
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_src_outer.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_src_outer.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_src_outer.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_src_outer.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_outer.source_address);
+ }
+ if (is_d_ipv6) {
+ /* Handle IPv6 destination address */
+ HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_dst_outer.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_dst_outer.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_dst_outer.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_dst_outer.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 destination address */
+ HWS_SET_HDR(fc, match_param, IPV4_DST_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_outer.destination_address);
+ }
+
+ /* L4 Handle TCP/UDP */
+ HWS_SET_HDR(fc, match_param, L4_SPORT_O,
+ outer_headers.tcp_sport, eth_l4_outer.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_O,
+ outer_headers.tcp_dport, eth_l4_outer.destination_port);
+ HWS_SET_HDR(fc, match_param, L4_SPORT_O,
+ outer_headers.udp_sport, eth_l4_outer.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_O,
+ outer_headers.udp_dport, eth_l4_outer.destination_port);
+ HWS_SET_HDR(fc, match_param, TCP_FLAGS_O,
+ outer_headers.tcp_flags, eth_l4_outer.tcp_flags);
+
+ /* L3 Handle DSCP, ECN and IHL */
+ HWS_SET_HDR(fc, match_param, IP_DSCP_O,
+ outer_headers.ip_dscp, eth_l3_outer.dscp);
+ HWS_SET_HDR(fc, match_param, IP_ECN_O,
+ outer_headers.ip_ecn, eth_l3_outer.ecn);
+ HWS_SET_HDR(fc, match_param, IPV4_IHL_O,
+ outer_headers.ipv4_ihl, eth_l3_outer.ihl);
+
+ /* Set IP fragmented bit */
+ if (HWS_IS_FLD_SET(match_param, outer_headers.frag)) {
+ smac_set = HWS_IS_FLD_SET(match_param, outer_headers.smac_15_0) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.smac_47_16);
+ dmac_set = HWS_IS_FLD_SET(match_param, outer_headers.dmac_15_0) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.dmac_47_16);
+ if (smac_set == dmac_set) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_O,
+ outer_headers.frag, eth_l4_outer.ip_fragmented);
+ } else {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_O,
+ outer_headers.frag, eth_l2_src_outer.ip_fragmented);
+ }
+ }
+
+ /* L3_type set */
+ if (HWS_IS_FLD_SET(match_param, outer_headers.ip_version)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.l3_type);
+ curr_fc->tag_set = &hws_definer_l3_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ HWS_CALC_HDR_SRC(curr_fc, outer_headers.ip_version);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ u32 *s_ipv6, *d_ipv6;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c2, 0xe) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c4, 0x4)) {
+ mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n");
+ return -EINVAL;
+ }
+
+ /* L2 Check ethertype */
+ HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
+ inner_headers.ethertype,
+ eth_l2_inner.l3_ethertype);
+ /* L2 Check SMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_I,
+ inner_headers.smac_47_16, eth_l2_src_inner.smac_47_16);
+ /* L2 Check SMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_I,
+ inner_headers.smac_15_0, eth_l2_src_inner.smac_15_0);
+ /* L2 Check DMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_I,
+ inner_headers.dmac_47_16, eth_l2_inner.dmac_47_16);
+ /* L2 Check DMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_I,
+ inner_headers.dmac_15_0, eth_l2_inner.dmac_15_0);
+
+ /* L2 VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_I,
+ inner_headers.first_prio, eth_l2_inner.first_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_CFI_I,
+ inner_headers.first_cfi, eth_l2_inner.first_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_ID_I,
+ inner_headers.first_vid, eth_l2_inner.first_vlan_id);
+
+ /* L2 CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.first_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_inner_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+ /* L3 Check IP header */
+ HWS_SET_HDR(fc, match_param, IP_PROTOCOL_I,
+ inner_headers.ip_protocol,
+ eth_l3_inner.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_VERSION_I,
+ inner_headers.ip_version,
+ eth_l3_inner.ip_version);
+ HWS_SET_HDR(fc, match_param, IP_TTL_I,
+ inner_headers.ttl_hoplimit,
+ eth_l3_inner.time_to_live_hop_limit);
+
+ /* L3 Check IPv4/IPv6 addresses */
+ s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ inner_headers.src_ipv4_src_ipv6.ipv6_layout);
+ d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
+
+ /* Assume IPv6 is used if ipv6 bits are set */
+ is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
+ is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ if (is_s_ipv6) {
+ /* Handle IPv6 source address */
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_src_inner.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_src_inner.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_src_inner.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_src_inner.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_inner.source_address);
+ }
+ if (is_d_ipv6) {
+ /* Handle IPv6 destination address */
+ HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_dst_inner.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_dst_inner.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_dst_inner.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_dst_inner.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 destination address */
+ HWS_SET_HDR(fc, match_param, IPV4_DST_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_inner.destination_address);
+ }
+
+ /* L4 Handle TCP/UDP */
+ HWS_SET_HDR(fc, match_param, L4_SPORT_I,
+ inner_headers.tcp_sport, eth_l4_inner.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_I,
+ inner_headers.tcp_dport, eth_l4_inner.destination_port);
+ HWS_SET_HDR(fc, match_param, L4_SPORT_I,
+ inner_headers.udp_sport, eth_l4_inner.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_I,
+ inner_headers.udp_dport, eth_l4_inner.destination_port);
+ HWS_SET_HDR(fc, match_param, TCP_FLAGS_I,
+ inner_headers.tcp_flags, eth_l4_inner.tcp_flags);
+
+ /* L3 Handle DSCP, ECN and IHL */
+ HWS_SET_HDR(fc, match_param, IP_DSCP_I,
+ inner_headers.ip_dscp, eth_l3_inner.dscp);
+ HWS_SET_HDR(fc, match_param, IP_ECN_I,
+ inner_headers.ip_ecn, eth_l3_inner.ecn);
+ HWS_SET_HDR(fc, match_param, IPV4_IHL_I,
+ inner_headers.ipv4_ihl, eth_l3_inner.ihl);
+
+ /* Set IP fragmented bit */
+ if (HWS_IS_FLD_SET(match_param, inner_headers.frag)) {
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.vxlan_vni)) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l2_inner.ip_fragmented);
+ } else {
+ smac_set = HWS_IS_FLD_SET(match_param, inner_headers.smac_15_0) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.smac_47_16);
+ dmac_set = HWS_IS_FLD_SET(match_param, inner_headers.dmac_15_0) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.dmac_47_16);
+ if (smac_set == dmac_set) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l4_inner.ip_fragmented);
+ } else {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l2_src_inner.ip_fragmented);
+ }
+ }
+ }
+
+ /* L3_type set */
+ if (HWS_IS_FLD_SET(match_param, inner_headers.ip_version)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.l3_type);
+ curr_fc->tag_set = &hws_definer_l3_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ HWS_CALC_HDR_SRC(curr_fc, inner_headers.ip_version);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1, 0x1) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_64, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_d8, 0x6) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_e0, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_100, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_120, 0xa) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_140, 0x8) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.bth_dst_qp) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.bth_opcode) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.inner_esp_spi) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.outer_esp_spi) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.source_vhca_port) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1a0, 0x60)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc parameters set\n");
+ return -EINVAL;
+ }
+
+ /* Check GRE related fields */
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_c_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_C];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_c_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_c_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_c_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_k_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_K];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_k_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_k_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_s_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_S];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_s_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_s_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_s_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_protocol)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_protocol,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_key.key)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY;
+ HWS_SET_HDR(fc, match_param, GRE_OPT_KEY,
+ misc_parameters.gre_key.key, tunnel_header.tunnel_header_2);
+ }
+
+ /* Check GENEVE related fields */
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_VNI];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_vni,
+ tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_opt_len)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_opt_len,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, opt_len);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, opt_len);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_protocol_type)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_PROTO];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_protocol_type,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, protocol_type);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_oam)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OAM];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_oam,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, o_flag);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, o_flag);
+ }
+
+ HWS_SET_HDR(fc, match_param, SOURCE_QP,
+ misc_parameters.source_sqn, source_qp_gvmi.source_qp);
+ HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_O,
+ misc_parameters.outer_ipv6_flow_label, eth_l3_outer.flow_label);
+ HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_I,
+ misc_parameters.inner_ipv6_flow_label, eth_l3_inner.flow_label);
+
+ /* L2 Second VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_O,
+ misc_parameters.outer_second_prio, eth_l2_outer.second_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_I,
+ misc_parameters.inner_second_prio, eth_l2_inner.second_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_O,
+ misc_parameters.outer_second_cfi, eth_l2_outer.second_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_I,
+ misc_parameters.inner_second_cfi, eth_l2_inner.second_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_O,
+ misc_parameters.outer_second_vid, eth_l2_outer.second_vlan_id);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_I,
+ misc_parameters.inner_second_vid, eth_l2_inner.second_vlan_id);
+
+ /* L2 Second CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.second_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_outer_second_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.second_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_inner_second_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ /* VXLAN VNI */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.vxlan_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_VNI];
+ HWS_CALC_HDR(curr_fc, misc_parameters.vxlan_vni, tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan, vni);
+ }
+
+ /* Flex protocol steering ok bits */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.geneve_tlv_option_0_exist)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+
+ if (!caps->flex_parser_ok_bits_supp) {
+ mlx5hws_err(cd->ctx, "Unsupported flex_parser_ok_bits_supp capability\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_steering_ok_bits_handler(
+ cd, caps->flex_parser_id_geneve_tlv_option_0);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters.geneve_tlv_option_0_exist);
+ }
+
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_SOURCE_GVMI];
+ HWS_CALC_HDR_DST(curr_fc, source_qp_gvmi.source_gvmi);
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ curr_fc->tag_set = HWS_IS_FLD_SET(match_param,
+ misc_parameters.source_eswitch_owner_vhca_id) ?
+ &hws_definer_set_source_gvmi_vhca_id :
+ &hws_definer_set_source_gvmi;
+ } else {
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.source_eswitch_owner_vhca_id)) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported source_eswitch_owner_vhca_id field usage\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1b8, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 parameters set\n");
+ return -EINVAL;
+ }
+
+ HWS_SET_HDR(fc, match_param, MPLS0_O,
+ misc_parameters_2.outer_first_mpls, mpls_outer.mpls0_label);
+ HWS_SET_HDR(fc, match_param, MPLS0_I,
+ misc_parameters_2.inner_first_mpls, mpls_inner.mpls0_label);
+ HWS_SET_HDR(fc, match_param, REG_0,
+ misc_parameters_2.metadata_reg_c_0, registers.register_c_0);
+ HWS_SET_HDR(fc, match_param, REG_1,
+ misc_parameters_2.metadata_reg_c_1, registers.register_c_1);
+ HWS_SET_HDR(fc, match_param, REG_2,
+ misc_parameters_2.metadata_reg_c_2, registers.register_c_2);
+ HWS_SET_HDR(fc, match_param, REG_3,
+ misc_parameters_2.metadata_reg_c_3, registers.register_c_3);
+ HWS_SET_HDR(fc, match_param, REG_4,
+ misc_parameters_2.metadata_reg_c_4, registers.register_c_4);
+ HWS_SET_HDR(fc, match_param, REG_5,
+ misc_parameters_2.metadata_reg_c_5, registers.register_c_5);
+ HWS_SET_HDR(fc, match_param, REG_6,
+ misc_parameters_2.metadata_reg_c_6, registers.register_c_6);
+ HWS_SET_HDR(fc, match_param, REG_7,
+ misc_parameters_2.metadata_reg_c_7, registers.register_c_7);
+ HWS_SET_HDR(fc, match_param, REG_A,
+ misc_parameters_2.metadata_reg_a, metadata.general_purpose);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_gre)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over gre parameters set\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_gre);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_gre);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_udp)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over udp parameters set\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_udp);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_udp);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc3(struct mlx5hws_definer_conv_data *cd, u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ bool vxlan_gpe_flex_parser_enabled;
+
+ /* Check reserved and unsupported fields */
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_80, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_b0, 0x10) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_170, 0x10) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_1e0, 0x20)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc3 parameters set\n");
+ return -EINVAL;
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_seq_num) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_ack_num)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_I;
+ HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
+ misc_parameters_3.inner_tcp_seq_num, tcp_icmp.tcp_seq);
+ HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
+ misc_parameters_3.inner_tcp_ack_num, tcp_icmp.tcp_ack);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_seq_num) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_ack_num)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_O;
+ HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
+ misc_parameters_3.outer_tcp_seq_num, tcp_icmp.tcp_seq);
+ HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
+ misc_parameters_3.outer_tcp_ack_num, tcp_icmp.tcp_ack);
+ }
+
+ vxlan_gpe_flex_parser_enabled = caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_vni,
+ tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_next_protocol)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_next_protocol,
+ tunnel_header.tunnel_header_0);
+ curr_fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_flags)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_flags,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_header_data) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported ICMPv4 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ HWS_SET_HDR(fc, match_param, ICMP_DW3,
+ misc_parameters_3.icmp_header_data, tcp_icmp.icmp_dw3);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
+ HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
+ curr_fc->tag_set = &hws_definer_icmp_dw1_set;
+ }
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_header_data) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported ICMPv6 parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ HWS_SET_HDR(fc, match_param, ICMP_DW3,
+ misc_parameters_3.icmpv6_header_data, tcp_icmp.icmp_dw3);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
+ HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
+ curr_fc->tag_set = &hws_definer_icmpv6_dw1_set;
+ }
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.geneve_tlv_option_0_data)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+
+ curr_fc =
+ hws_definer_flex_parser_handler(cd,
+ caps->flex_parser_id_geneve_tlv_option_0);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.geneve_tlv_option_0_data);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_teid)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU TEID flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_TEID];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, teid);
+ fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_teid);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_type)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, msg_type);
+ fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type);
+ fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_type);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_flags)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, msg_flags);
+ fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_flags);
+ fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_flags);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_2)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU DW2 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW2];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_2);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_first_ext_dw_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU first EXT DW0 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_first_ext_dw_0);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU DW0 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW0];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_0);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc4(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool parser_is_used[HWS_NUM_OF_FLEX_PARSERS] = {};
+ struct mlx5hws_definer_fc *fc;
+ u32 id, value;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_4.reserved_at_100, 0x100)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc4 parameters set\n");
+ return -EINVAL;
+ }
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_0);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_0);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_0);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_1);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_1);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_1);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_2);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_2);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_2);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_3);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_3);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_3);
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc5(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_definer_fc *fc = cd->fc;
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_0) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_1) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_2) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_3) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_5.reserved_at_100, 0x100)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc5 parameters set\n");
+ return -EINVAL;
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_0,
+ misc_parameters_5.tunnel_header_0, tunnel_header.tunnel_header_0);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_1)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_1,
+ misc_parameters_5.tunnel_header_1, tunnel_header.tunnel_header_1);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_2)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_2,
+ misc_parameters_5.tunnel_header_2, tunnel_header.tunnel_header_2);
+ }
+
+ HWS_SET_HDR(fc, match_param, TNL_HDR_3,
+ misc_parameters_5.tunnel_header_3, tunnel_header.tunnel_header_3);
+
+ return 0;
+}
+
+static int hws_definer_get_fc_size(struct mlx5hws_definer_fc *fc)
+{
+ u32 fc_sz = 0;
+ int i;
+
+ /* For empty matcher, ZERO_SIZE_PTR is returned */
+ if (fc == ZERO_SIZE_PTR)
+ return 0;
+
+ for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++)
+ if (fc[i].tag_set)
+ fc_sz++;
+ return fc_sz;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc *fc)
+{
+ struct mlx5hws_definer_fc *compressed_fc = NULL;
+ u32 definer_size = hws_definer_get_fc_size(fc);
+ u32 fc_sz = 0;
+ int i;
+
+ compressed_fc = kcalloc(definer_size, sizeof(*compressed_fc), GFP_KERNEL);
+ if (!compressed_fc)
+ return NULL;
+
+ /* For empty matcher, ZERO_SIZE_PTR is returned */
+ if (!definer_size)
+ return compressed_fc;
+
+ for (i = 0, fc_sz = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
+ if (!fc[i].tag_set)
+ continue;
+
+ fc[i].fname = i;
+ memcpy(&compressed_fc[fc_sz++], &fc[i], sizeof(*compressed_fc));
+ }
+
+ return compressed_fc;
+}
+
+static void
+hws_definer_set_hl(u8 *hl, struct mlx5hws_definer_fc *fc)
+{
+ int i;
+
+ /* nothing to do for empty matcher */
+ if (fc == ZERO_SIZE_PTR)
+ return;
+
+ for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
+ if (!fc[i].tag_set)
+ continue;
+
+ HWS_SET32(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
+ }
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_alloc_fc(struct mlx5hws_context *ctx,
+ size_t len)
+{
+ struct mlx5hws_definer_fc *fc;
+ int i;
+
+ fc = kcalloc(len, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return NULL;
+
+ for (i = 0; i < len; i++)
+ fc[i].ctx = ctx;
+
+ return fc;
+}
+
+static int
+hws_definer_conv_match_params_to_hl(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ u8 *hl)
+{
+ struct mlx5hws_definer_conv_data cd = {0};
+ struct mlx5hws_definer_fc *fc;
+ int ret;
+
+ fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
+ if (!fc)
+ return -ENOMEM;
+
+ cd.fc = fc;
+ cd.ctx = ctx;
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6) {
+ mlx5hws_err(ctx, "Unsupported match_criteria_enable provided\n");
+ ret = -EOPNOTSUPP;
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
+ ret = hws_definer_conv_outer(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
+ ret = hws_definer_conv_inner(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
+ ret = hws_definer_conv_misc(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
+ ret = hws_definer_conv_misc2(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
+ ret = hws_definer_conv_misc3(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
+ ret = hws_definer_conv_misc4(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
+ ret = hws_definer_conv_misc5(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ /* Check there is no conflicted fields set together */
+ ret = hws_definer_check_match_flags(&cd);
+ if (ret)
+ goto err_free_fc;
+
+ /* Allocate fc array on mt */
+ mt->fc = hws_definer_alloc_compressed_fc(fc);
+ if (!mt->fc) {
+ mlx5hws_err(ctx,
+ "Convert match params: failed to set field copy to match template\n");
+ ret = -ENOMEM;
+ goto err_free_fc;
+ }
+ mt->fc_sz = hws_definer_get_fc_size(fc);
+
+ /* Fill in headers layout */
+ hws_definer_set_hl(hl, fc);
+
+ kfree(fc);
+ return 0;
+
+err_free_fc:
+ kfree(fc);
+ return ret;
+}
+
+struct mlx5hws_definer_fc *
+mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ u32 *match_param,
+ int *fc_sz)
+{
+ struct mlx5hws_definer_fc *compressed_fc = NULL;
+ struct mlx5hws_definer_conv_data cd = {0};
+ struct mlx5hws_definer_fc *fc;
+ int ret;
+
+ fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
+ if (!fc)
+ return NULL;
+
+ cd.fc = fc;
+ cd.ctx = ctx;
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
+ ret = hws_definer_conv_outer(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
+ ret = hws_definer_conv_inner(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
+ ret = hws_definer_conv_misc(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
+ ret = hws_definer_conv_misc2(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
+ ret = hws_definer_conv_misc3(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
+ ret = hws_definer_conv_misc4(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
+ ret = hws_definer_conv_misc5(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ /* Allocate fc array on mt */
+ compressed_fc = hws_definer_alloc_compressed_fc(fc);
+ if (!compressed_fc) {
+ mlx5hws_err(ctx,
+ "Convert to compressed fc: failed to set field copy to match template\n");
+ goto err_free_fc;
+ }
+ *fc_sz = hws_definer_get_fc_size(fc);
+
+err_free_fc:
+ kfree(fc);
+ return compressed_fc;
+}
+
+static int
+hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer,
+ u32 hl_byte_off,
+ u32 *tag_byte_off)
+{
+ int i, dw_to_scan;
+ u8 byte_offset;
+
+ /* Avoid accessing unused DW selectors */
+ dw_to_scan = mlx5hws_definer_is_jumbo(definer) ?
+ DW_SELECTORS : DW_SELECTORS_MATCH;
+
+ /* Add offset since each DW covers multiple BYTEs */
+ byte_offset = hl_byte_off % DW_SIZE;
+ for (i = 0; i < dw_to_scan; i++) {
+ if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) {
+ *tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1);
+ return 0;
+ }
+ }
+
+ /* Add offset to skip DWs in definer */
+ byte_offset = DW_SIZE * DW_SELECTORS;
+ /* Iterate in reverse since the code uses bytes from 7 -> 0 */
+ for (i = BYTE_SELECTORS; i-- > 0 ;) {
+ if (definer->byte_selector[i] == hl_byte_off) {
+ *tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int
+hws_definer_fc_bind(struct mlx5hws_definer *definer,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz)
+{
+ u32 tag_offset = 0;
+ int ret, byte_diff;
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ /* Map header layout byte offset to byte offset in tag */
+ ret = hws_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset);
+ if (ret)
+ return ret;
+
+ /* Move setter based on the location in the definer */
+ byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE;
+ fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE;
+
+ /* Update offset in headers layout to offset in tag */
+ fc->byte_off = tag_offset;
+ fc++;
+ }
+
+ return 0;
+}
+
+static bool
+hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl *ctrl,
+ u32 cur_dw,
+ u32 *data)
+{
+ u8 bytes_set;
+ int byte_idx;
+ bool ret;
+ int i;
+
+ /* Reached end, nothing left to do */
+ if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
+ return true;
+
+ /* No data set, can skip to next DW */
+ while (!*data) {
+ cur_dw++;
+ data++;
+
+ /* Reached end, nothing left to do */
+ if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
+ return true;
+ }
+
+ /* Used all DW selectors and Byte selectors, no possible solution */
+ if (ctrl->allowed_full_dw == ctrl->used_full_dw &&
+ ctrl->allowed_lim_dw == ctrl->used_lim_dw &&
+ ctrl->allowed_bytes == ctrl->used_bytes)
+ return false;
+
+ /* Try to use limited DW selectors */
+ if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) {
+ ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw;
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0;
+ }
+
+ /* Try to use DW selectors */
+ if (ctrl->allowed_full_dw > ctrl->used_full_dw) {
+ ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw;
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ ctrl->full_dw_selector[--ctrl->used_full_dw] = 0;
+ }
+
+ /* No byte selector for offset bigger than 255 */
+ if (cur_dw * DW_SIZE > 255)
+ return false;
+
+ bytes_set = !!(0x000000ff & *data) +
+ !!(0x0000ff00 & *data) +
+ !!(0x00ff0000 & *data) +
+ !!(0xff000000 & *data);
+
+ /* Check if there are enough byte selectors left */
+ if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes)
+ return false;
+
+ /* Try to use Byte selectors */
+ for (i = 0; i < DW_SIZE; i++)
+ if ((0xff000000 >> (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
+ /* Use byte selectors high to low */
+ byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
+ ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i;
+ ctrl->used_bytes++;
+ }
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DW_SIZE; i++)
+ if ((0xff << (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
+ ctrl->used_bytes--;
+ byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
+ ctrl->byte_selector[byte_idx] = 0;
+ }
+
+ return false;
+}
+
+static void
+hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl,
+ struct mlx5hws_definer *definer)
+{
+ memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);
+ memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);
+ memcpy(definer->dw_selector + ctrl->allowed_full_dw,
+ ctrl->lim_dw_selector, ctrl->allowed_lim_dw);
+}
+
+static int
+hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer,
+ u8 *hl)
+{
+ struct mlx5hws_definer_sel_ctrl ctrl = {0};
+ bool found;
+
+ /* Try to create a match definer */
+ ctrl.allowed_full_dw = DW_SELECTORS_MATCH;
+ ctrl.allowed_lim_dw = 0;
+ ctrl.allowed_bytes = BYTE_SELECTORS;
+
+ found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
+ if (found) {
+ hws_definer_copy_sel_ctrl(&ctrl, definer);
+ definer->type = MLX5HWS_DEFINER_TYPE_MATCH;
+ return 0;
+ }
+
+ /* Try to create a full/limited jumbo definer */
+ ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
+ DW_SELECTORS_MATCH;
+ ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 :
+ DW_SELECTORS_LIMITED;
+ ctrl.allowed_bytes = BYTE_SELECTORS;
+
+ found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
+ if (found) {
+ hws_definer_copy_sel_ctrl(&ctrl, definer);
+ definer->type = MLX5HWS_DEFINER_TYPE_JUMBO;
+ return 0;
+ }
+
+ return -E2BIG;
+}
+
+static void
+hws_definer_create_tag_mask(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag)
+{
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ if (fc->tag_mask_set)
+ fc->tag_mask_set(fc, match_param, tag);
+ else
+ fc->tag_set(fc, match_param, tag);
+ fc++;
+ }
+}
+
+void mlx5hws_definer_create_tag(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag)
+{
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ fc->tag_set(fc, match_param, tag);
+ fc++;
+ }
+}
+
+int mlx5hws_definer_get_id(struct mlx5hws_definer *definer)
+{
+ return definer->obj_id;
+}
+
+int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
+ struct mlx5hws_definer *definer_b)
+{
+ int i;
+
+ /* Future: Optimize by comparing selectors with valid mask only */
+ for (i = 0; i < BYTE_SELECTORS; i++)
+ if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
+ return 1;
+
+ for (i = 0; i < DW_SELECTORS; i++)
+ if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
+ return 1;
+
+ for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
+ if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
+ return 1;
+
+ return 0;
+}
+
+int
+mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_definer)
+{
+ u8 *match_hl;
+ int ret;
+
+ /* Union header-layout (hl) is used for creating a single definer
+ * field layout used with different bitmasks for hash and match.
+ */
+ match_hl = kzalloc(MLX5_ST_SZ_BYTES(definer_hl), GFP_KERNEL);
+ if (!match_hl)
+ return -ENOMEM;
+
+ /* Convert all mt items to header layout (hl)
+ * and allocate the match and range field copy array (fc & fcr).
+ */
+ ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to convert items to header layout\n");
+ goto free_match_hl;
+ }
+
+ /* Find the match definer layout for header layout match union */
+ ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl);
+ if (ret) {
+ if (ret == -E2BIG)
+ mlx5hws_dbg(ctx,
+ "Failed to create match definer from header layout - E2BIG\n");
+ else
+ mlx5hws_err(ctx,
+ "Failed to create match definer from header layout (%d)\n",
+ ret);
+ goto free_fc;
+ }
+
+ kfree(match_hl);
+ return 0;
+
+free_fc:
+ kfree(mt->fc);
+free_match_hl:
+ kfree(match_hl);
+ return ret;
+}
+
+int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache)
+{
+ struct mlx5hws_definer_cache *new_cache;
+
+ new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
+ if (!new_cache)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&new_cache->list_head);
+ *cache = new_cache;
+
+ return 0;
+}
+
+void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache)
+{
+ kfree(cache);
+}
+
+int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer)
+{
+ struct mlx5hws_definer_cache *cache = ctx->definer_cache;
+ struct mlx5hws_cmd_definer_create_attr def_attr = {0};
+ struct mlx5hws_definer_cache_item *cached_definer;
+ u32 obj_id;
+ int ret;
+
+ /* Search definer cache for requested definer */
+ list_for_each_entry(cached_definer, &cache->list_head, list_node) {
+ if (mlx5hws_definer_compare(&cached_definer->definer, definer))
+ continue;
+
+ /* Reuse definer and set LRU (move to be first in the list) */
+ list_del_init(&cached_definer->list_node);
+ list_add(&cached_definer->list_node, &cache->list_head);
+ cached_definer->refcount++;
+ return cached_definer->definer.obj_id;
+ }
+
+ /* Allocate and create definer based on the bitmask tag */
+ def_attr.match_mask = definer->mask.jumbo;
+ def_attr.dw_selector = definer->dw_selector;
+ def_attr.byte_selector = definer->byte_selector;
+
+ ret = mlx5hws_cmd_definer_create(ctx->mdev, &def_attr, &obj_id);
+ if (ret)
+ return -1;
+
+ cached_definer = kzalloc(sizeof(*cached_definer), GFP_KERNEL);
+ if (!cached_definer)
+ goto free_definer_obj;
+
+ memcpy(&cached_definer->definer, definer, sizeof(*definer));
+ cached_definer->definer.obj_id = obj_id;
+ cached_definer->refcount = 1;
+ list_add(&cached_definer->list_node, &cache->list_head);
+
+ return obj_id;
+
+free_definer_obj:
+ mlx5hws_cmd_definer_destroy(ctx->mdev, obj_id);
+ return -1;
+}
+
+static void
+hws_definer_put_obj(struct mlx5hws_context *ctx, u32 obj_id)
+{
+ struct mlx5hws_definer_cache_item *cached_definer;
+
+ list_for_each_entry(cached_definer, &ctx->definer_cache->list_head, list_node) {
+ if (cached_definer->definer.obj_id != obj_id)
+ continue;
+
+ /* Object found */
+ if (--cached_definer->refcount)
+ return;
+
+ list_del_init(&cached_definer->list_node);
+ mlx5hws_cmd_definer_destroy(ctx->mdev, cached_definer->definer.obj_id);
+ kfree(cached_definer);
+ return;
+ }
+
+ /* Programming error, object must be part of cache */
+ pr_warn("HWS: failed putting definer object\n");
+}
+
+static struct mlx5hws_definer *
+hws_definer_alloc(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer_fc *fc,
+ int fc_sz,
+ u32 *match_param,
+ struct mlx5hws_definer *layout,
+ bool bind_fc)
+{
+ struct mlx5hws_definer *definer;
+ int ret;
+
+ definer = kmemdup(layout, sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return NULL;
+
+ /* Align field copy array based on given layout */
+ if (bind_fc) {
+ ret = hws_definer_fc_bind(definer, fc, fc_sz);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to bind field copy to definer\n");
+ goto free_definer;
+ }
+ }
+
+ /* Create the tag mask used for definer creation */
+ hws_definer_create_tag_mask(match_param, fc, fc_sz, definer->mask.jumbo);
+
+ ret = mlx5hws_definer_get_obj(ctx, definer);
+ if (ret < 0)
+ goto free_definer;
+
+ definer->obj_id = ret;
+ return definer;
+
+free_definer:
+ kfree(definer);
+ return NULL;
+}
+
+void mlx5hws_definer_free(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer)
+{
+ hws_definer_put_obj(ctx, definer->obj_id);
+ kfree(definer);
+}
+
+static int
+hws_definer_mt_match_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_layout)
+{
+ /* Create mandatory match definer */
+ mt->definer = hws_definer_alloc(ctx,
+ mt->fc,
+ mt->fc_sz,
+ mt->match_param,
+ match_layout,
+ true);
+ if (!mt->definer) {
+ mlx5hws_err(ctx, "Failed to create match definer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+hws_definer_mt_match_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ mlx5hws_definer_free(ctx, mt->definer);
+}
+
+int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ struct mlx5hws_definer match_layout = {0};
+ int ret;
+
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+ return ret;
+ }
+
+ /* Calculate definers needed for exact match */
+ ret = hws_definer_mt_match_init(ctx, mt, &match_layout);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to init match definers\n");
+ goto free_fc;
+ }
+
+ return 0;
+
+free_fc:
+ kfree(mt->fc);
+ return ret;
+}
+
+void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ hws_definer_mt_match_uninit(ctx, mt);
+ kfree(mt->fc);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h
new file mode 100644
index 000000000000..2f6a7df4021c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_DEFINER_H_
+#define MLX5HWS_DEFINER_H_
+
+/* Max available selecotrs */
+#define DW_SELECTORS 9
+#define BYTE_SELECTORS 8
+
+/* Selectors based on match TAG */
+#define DW_SELECTORS_MATCH 6
+#define DW_SELECTORS_LIMITED 3
+
+/* Selectors based on range TAG */
+#define DW_SELECTORS_RANGE 2
+#define BYTE_SELECTORS_RANGE 8
+
+#define HWS_NUM_OF_FLEX_PARSERS 8
+
+enum mlx5hws_definer_fname {
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I,
+ MLX5HWS_DEFINER_FNAME_ETH_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_ETH_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_CFI_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_CFI_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_ID_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_ID_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_IHL_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_IHL_I,
+ MLX5HWS_DEFINER_FNAME_IP_DSCP_O,
+ MLX5HWS_DEFINER_FNAME_IP_DSCP_I,
+ MLX5HWS_DEFINER_FNAME_IP_ECN_O,
+ MLX5HWS_DEFINER_FNAME_IP_ECN_I,
+ MLX5HWS_DEFINER_FNAME_IP_TTL_O,
+ MLX5HWS_DEFINER_FNAME_IP_TTL_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_DST_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_DST_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_SRC_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_SRC_I,
+ MLX5HWS_DEFINER_FNAME_IP_VERSION_O,
+ MLX5HWS_DEFINER_FNAME_IP_VERSION_I,
+ MLX5HWS_DEFINER_FNAME_IP_FRAG_O,
+ MLX5HWS_DEFINER_FNAME_IP_FRAG_I,
+ MLX5HWS_DEFINER_FNAME_IP_LEN_O,
+ MLX5HWS_DEFINER_FNAME_IP_LEN_I,
+ MLX5HWS_DEFINER_FNAME_IP_TOS_O,
+ MLX5HWS_DEFINER_FNAME_IP_TOS_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I,
+ MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O,
+ MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I,
+ MLX5HWS_DEFINER_FNAME_L4_SPORT_O,
+ MLX5HWS_DEFINER_FNAME_L4_SPORT_I,
+ MLX5HWS_DEFINER_FNAME_L4_DPORT_O,
+ MLX5HWS_DEFINER_FNAME_L4_DPORT_I,
+ MLX5HWS_DEFINER_FNAME_TCP_FLAGS_I,
+ MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O,
+ MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM,
+ MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM,
+ MLX5HWS_DEFINER_FNAME_GTP_TEID,
+ MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_FLAG,
+ MLX5HWS_DEFINER_FNAME_GTP_NEXT_EXT_HDR,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_PDU,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_QFI,
+ MLX5HWS_DEFINER_FNAME_GTPU_DW0,
+ MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0,
+ MLX5HWS_DEFINER_FNAME_GTPU_DW2,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7,
+ MLX5HWS_DEFINER_FNAME_VPORT_REG_C_0,
+ MLX5HWS_DEFINER_FNAME_VXLAN_FLAGS,
+ MLX5HWS_DEFINER_FNAME_VXLAN_VNI,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OAM,
+ MLX5HWS_DEFINER_FNAME_GENEVE_PROTO,
+ MLX5HWS_DEFINER_FNAME_GENEVE_VNI,
+ MLX5HWS_DEFINER_FNAME_SOURCE_QP,
+ MLX5HWS_DEFINER_FNAME_SOURCE_GVMI,
+ MLX5HWS_DEFINER_FNAME_REG_0,
+ MLX5HWS_DEFINER_FNAME_REG_1,
+ MLX5HWS_DEFINER_FNAME_REG_2,
+ MLX5HWS_DEFINER_FNAME_REG_3,
+ MLX5HWS_DEFINER_FNAME_REG_4,
+ MLX5HWS_DEFINER_FNAME_REG_5,
+ MLX5HWS_DEFINER_FNAME_REG_6,
+ MLX5HWS_DEFINER_FNAME_REG_7,
+ MLX5HWS_DEFINER_FNAME_REG_8,
+ MLX5HWS_DEFINER_FNAME_REG_9,
+ MLX5HWS_DEFINER_FNAME_REG_10,
+ MLX5HWS_DEFINER_FNAME_REG_11,
+ MLX5HWS_DEFINER_FNAME_REG_A,
+ MLX5HWS_DEFINER_FNAME_REG_B,
+ MLX5HWS_DEFINER_FNAME_GRE_KEY_PRESENT,
+ MLX5HWS_DEFINER_FNAME_GRE_C,
+ MLX5HWS_DEFINER_FNAME_GRE_K,
+ MLX5HWS_DEFINER_FNAME_GRE_S,
+ MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_SEQ,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_CHECKSUM,
+ MLX5HWS_DEFINER_FNAME_INTEGRITY_O,
+ MLX5HWS_DEFINER_FNAME_INTEGRITY_I,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW1,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW2,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW3,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SPI,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SEQUENCE_NUMBER,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SYNDROME,
+ MLX5HWS_DEFINER_FNAME_MPLS0_O,
+ MLX5HWS_DEFINER_FNAME_MPLS1_O,
+ MLX5HWS_DEFINER_FNAME_MPLS2_O,
+ MLX5HWS_DEFINER_FNAME_MPLS3_O,
+ MLX5HWS_DEFINER_FNAME_MPLS4_O,
+ MLX5HWS_DEFINER_FNAME_MPLS0_I,
+ MLX5HWS_DEFINER_FNAME_MPLS1_I,
+ MLX5HWS_DEFINER_FNAME_MPLS2_I,
+ MLX5HWS_DEFINER_FNAME_MPLS3_I,
+ MLX5HWS_DEFINER_FNAME_MPLS4_I,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_I,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_0,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_2,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_3,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_4,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_5,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_6,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_7,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_0,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_2,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_3,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_4,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_5,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_6,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_7,
+ MLX5HWS_DEFINER_FNAME_IB_L4_OPCODE,
+ MLX5HWS_DEFINER_FNAME_IB_L4_QPN,
+ MLX5HWS_DEFINER_FNAME_IB_L4_A,
+ MLX5HWS_DEFINER_FNAME_RANDOM_NUM,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L2_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L2_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L3_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L3_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_I,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_0,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_1,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_2,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_3,
+ MLX5HWS_DEFINER_FNAME_MAX,
+};
+
+enum mlx5hws_definer_match_criteria {
+ MLX5HWS_DEFINER_MATCH_CRITERIA_EMPTY = 0,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER = 1 << 0,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC = 1 << 1,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_INNER = 1 << 2,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2 = 1 << 3,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3 = 1 << 4,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4 = 1 << 5,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5 = 1 << 6,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6 = 1 << 7,
+};
+
+enum mlx5hws_definer_type {
+ MLX5HWS_DEFINER_TYPE_MATCH,
+ MLX5HWS_DEFINER_TYPE_JUMBO,
+};
+
+enum mlx5hws_definer_match_flag {
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE = 1 << 0,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE = 1 << 1,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU = 1 << 2,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE = 1 << 3,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN = 1 << 4,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1 = 1 << 5,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY = 1 << 6,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2 = 1 << 7,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE = 1 << 8,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP = 1 << 9,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 = 1 << 10,
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 = 1 << 11,
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_O = 1 << 12,
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_I = 1 << 13,
+};
+
+struct mlx5hws_definer_fc {
+ struct mlx5hws_context *ctx;
+ /* Source */
+ u32 s_byte_off;
+ int s_bit_off;
+ u32 s_bit_mask;
+ /* Destination */
+ u32 byte_off;
+ int bit_off;
+ u32 bit_mask;
+ enum mlx5hws_definer_fname fname;
+ void (*tag_set)(struct mlx5hws_definer_fc *fc,
+ void *mach_param,
+ u8 *tag);
+ void (*tag_mask_set)(struct mlx5hws_definer_fc *fc,
+ void *mach_param,
+ u8 *tag);
+};
+
+struct mlx5_ifc_definer_hl_eth_l2_bits {
+ u8 dmac_47_16[0x20];
+ u8 dmac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+ u8 reserved_at_40[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 ip_fragmented[0x1];
+ u8 qp_type[0x2];
+ u8 encap_type[0x2];
+ u8 port_number[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_id[0xc];
+ u8 l4_type[0x4];
+ u8 reserved_at_64[0x2];
+ u8 ipsec_layer[0x2];
+ u8 l2_type[0x2];
+ u8 force_lb[0x1];
+ u8 l2_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 l4_ok[0x1];
+ u8 second_vlan_qualifier[0x2];
+ u8 second_priority[0x3];
+ u8 second_cfi[0x1];
+ u8 second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_definer_hl_eth_l2_src_bits {
+ u8 smac_47_16[0x20];
+ u8 smac_15_0[0x10];
+ u8 loopback_syndrome[0x8];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 ip_fragmented[0x1];
+ u8 functional_lb[0x1];
+};
+
+struct mlx5_ifc_definer_hl_ib_l2_bits {
+ u8 sx_sniffer[0x1];
+ u8 force_lb[0x1];
+ u8 functional_lb[0x1];
+ u8 reserved_at_3[0x3];
+ u8 port_number[0x2];
+ u8 sl[0x4];
+ u8 qp_type[0x2];
+ u8 lnh[0x2];
+ u8 dlid[0x10];
+ u8 vl[0x4];
+ u8 lrh_packet_length[0xc];
+ u8 slid[0x10];
+};
+
+struct mlx5_ifc_definer_hl_eth_l3_bits {
+ u8 ip_version[0x4];
+ u8 ihl[0x4];
+ union {
+ u8 tos[0x8];
+ struct {
+ u8 dscp[0x6];
+ u8 ecn[0x2];
+ };
+ };
+ u8 time_to_live_hop_limit[0x8];
+ u8 protocol_next_header[0x8];
+ u8 identification[0x10];
+ union {
+ u8 ipv4_frag[0x10];
+ struct {
+ u8 flags[0x3];
+ u8 fragment_offset[0xd];
+ };
+ };
+ u8 ipv4_total_length[0x10];
+ u8 checksum[0x10];
+ u8 reserved_at_60[0xc];
+ u8 flow_label[0x14];
+ u8 packet_length[0x10];
+ u8 ipv6_payload_length[0x10];
+};
+
+struct mlx5_ifc_definer_hl_eth_l4_bits {
+ u8 source_port[0x10];
+ u8 destination_port[0x10];
+ u8 data_offset[0x4];
+ u8 l4_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 ip_fragmented[0x1];
+ u8 tcp_ns[0x1];
+ union {
+ u8 tcp_flags[0x8];
+ struct {
+ u8 tcp_cwr[0x1];
+ u8 tcp_ece[0x1];
+ u8 tcp_urg[0x1];
+ u8 tcp_ack[0x1];
+ u8 tcp_psh[0x1];
+ u8 tcp_rst[0x1];
+ u8 tcp_syn[0x1];
+ u8 tcp_fin[0x1];
+ };
+ };
+ u8 first_fragment[0x1];
+ u8 reserved_at_31[0xf];
+};
+
+struct mlx5_ifc_definer_hl_src_qp_gvmi_bits {
+ u8 loopback_syndrome[0x8];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 reserved_at_e[0x1];
+ u8 functional_lb[0x1];
+ u8 source_gvmi[0x10];
+ u8 force_lb[0x1];
+ u8 ip_fragmented[0x1];
+ u8 source_is_requestor[0x1];
+ u8 reserved_at_23[0x5];
+ u8 source_qp[0x18];
+};
+
+struct mlx5_ifc_definer_hl_ib_l4_bits {
+ u8 opcode[0x8];
+ u8 qp[0x18];
+ u8 se[0x1];
+ u8 migreq[0x1];
+ u8 ackreq[0x1];
+ u8 fecn[0x1];
+ u8 becn[0x1];
+ u8 bth[0x1];
+ u8 deth[0x1];
+ u8 dcceth[0x1];
+ u8 reserved_at_28[0x2];
+ u8 pad_count[0x2];
+ u8 tver[0x4];
+ u8 p_key[0x10];
+ u8 reserved_at_40[0x8];
+ u8 deth_source_qp[0x18];
+};
+
+enum mlx5hws_integrity_ok1_bits {
+ MLX5HWS_DEFINER_OKS1_FIRST_L4_OK = 24,
+ MLX5HWS_DEFINER_OKS1_FIRST_L3_OK = 25,
+ MLX5HWS_DEFINER_OKS1_SECOND_L4_OK = 26,
+ MLX5HWS_DEFINER_OKS1_SECOND_L3_OK = 27,
+ MLX5HWS_DEFINER_OKS1_FIRST_L4_CSUM_OK = 28,
+ MLX5HWS_DEFINER_OKS1_FIRST_IPV4_CSUM_OK = 29,
+ MLX5HWS_DEFINER_OKS1_SECOND_L4_CSUM_OK = 30,
+ MLX5HWS_DEFINER_OKS1_SECOND_IPV4_CSUM_OK = 31,
+};
+
+struct mlx5_ifc_definer_hl_oks1_bits {
+ union {
+ u8 oks1_bits[0x20];
+ struct {
+ u8 second_ipv4_checksum_ok[0x1];
+ u8 second_l4_checksum_ok[0x1];
+ u8 first_ipv4_checksum_ok[0x1];
+ u8 first_l4_checksum_ok[0x1];
+ u8 second_l3_ok[0x1];
+ u8 second_l4_ok[0x1];
+ u8 first_l3_ok[0x1];
+ u8 first_l4_ok[0x1];
+ u8 flex_parser7_steering_ok[0x1];
+ u8 flex_parser6_steering_ok[0x1];
+ u8 flex_parser5_steering_ok[0x1];
+ u8 flex_parser4_steering_ok[0x1];
+ u8 flex_parser3_steering_ok[0x1];
+ u8 flex_parser2_steering_ok[0x1];
+ u8 flex_parser1_steering_ok[0x1];
+ u8 flex_parser0_steering_ok[0x1];
+ u8 second_ipv6_extension_header_vld[0x1];
+ u8 first_ipv6_extension_header_vld[0x1];
+ u8 l3_tunneling_ok[0x1];
+ u8 l2_tunneling_ok[0x1];
+ u8 second_tcp_ok[0x1];
+ u8 second_udp_ok[0x1];
+ u8 second_ipv4_ok[0x1];
+ u8 second_ipv6_ok[0x1];
+ u8 second_l2_ok[0x1];
+ u8 vxlan_ok[0x1];
+ u8 gre_ok[0x1];
+ u8 first_tcp_ok[0x1];
+ u8 first_udp_ok[0x1];
+ u8 first_ipv4_ok[0x1];
+ u8 first_ipv6_ok[0x1];
+ u8 first_l2_ok[0x1];
+ };
+ };
+};
+
+struct mlx5_ifc_definer_hl_oks2_bits {
+ u8 reserved_at_0[0xa];
+ u8 second_mpls_ok[0x1];
+ u8 second_mpls4_s_bit[0x1];
+ u8 second_mpls4_qualifier[0x1];
+ u8 second_mpls3_s_bit[0x1];
+ u8 second_mpls3_qualifier[0x1];
+ u8 second_mpls2_s_bit[0x1];
+ u8 second_mpls2_qualifier[0x1];
+ u8 second_mpls1_s_bit[0x1];
+ u8 second_mpls1_qualifier[0x1];
+ u8 second_mpls0_s_bit[0x1];
+ u8 second_mpls0_qualifier[0x1];
+ u8 first_mpls_ok[0x1];
+ u8 first_mpls4_s_bit[0x1];
+ u8 first_mpls4_qualifier[0x1];
+ u8 first_mpls3_s_bit[0x1];
+ u8 first_mpls3_qualifier[0x1];
+ u8 first_mpls2_s_bit[0x1];
+ u8 first_mpls2_qualifier[0x1];
+ u8 first_mpls1_s_bit[0x1];
+ u8 first_mpls1_qualifier[0x1];
+ u8 first_mpls0_s_bit[0x1];
+ u8 first_mpls0_qualifier[0x1];
+};
+
+struct mlx5_ifc_definer_hl_voq_bits {
+ u8 reserved_at_0[0x18];
+ u8 ecn_ok[0x1];
+ u8 congestion[0x1];
+ u8 profile[0x2];
+ u8 internal_prio[0x4];
+};
+
+struct mlx5_ifc_definer_hl_ipv4_src_dst_bits {
+ u8 source_address[0x20];
+ u8 destination_address[0x20];
+};
+
+struct mlx5_ifc_definer_hl_random_number_bits {
+ u8 random_number[0x10];
+ u8 reserved[0x10];
+};
+
+struct mlx5_ifc_definer_hl_ipv6_addr_bits {
+ u8 ipv6_address_127_96[0x20];
+ u8 ipv6_address_95_64[0x20];
+ u8 ipv6_address_63_32[0x20];
+ u8 ipv6_address_31_0[0x20];
+};
+
+struct mlx5_ifc_definer_tcp_icmp_header_bits {
+ union {
+ struct {
+ u8 icmp_dw1[0x20];
+ u8 icmp_dw2[0x20];
+ u8 icmp_dw3[0x20];
+ };
+ struct {
+ u8 tcp_seq[0x20];
+ u8 tcp_ack[0x20];
+ u8 tcp_win_urg[0x20];
+ };
+ };
+};
+
+struct mlx5_ifc_definer_hl_tunnel_header_bits {
+ u8 tunnel_header_0[0x20];
+ u8 tunnel_header_1[0x20];
+ u8 tunnel_header_2[0x20];
+ u8 tunnel_header_3[0x20];
+};
+
+struct mlx5_ifc_definer_hl_ipsec_bits {
+ u8 spi[0x20];
+ u8 sequence_number[0x20];
+ u8 reserved[0x10];
+ u8 ipsec_syndrome[0x8];
+ u8 next_header[0x8];
+};
+
+struct mlx5_ifc_definer_hl_metadata_bits {
+ u8 metadata_to_cqe[0x20];
+ u8 general_purpose[0x20];
+ u8 acomulated_hash[0x20];
+};
+
+struct mlx5_ifc_definer_hl_flex_parser_bits {
+ u8 flex_parser_7[0x20];
+ u8 flex_parser_6[0x20];
+ u8 flex_parser_5[0x20];
+ u8 flex_parser_4[0x20];
+ u8 flex_parser_3[0x20];
+ u8 flex_parser_2[0x20];
+ u8 flex_parser_1[0x20];
+ u8 flex_parser_0[0x20];
+};
+
+struct mlx5_ifc_definer_hl_registers_bits {
+ u8 register_c_10[0x20];
+ u8 register_c_11[0x20];
+ u8 register_c_8[0x20];
+ u8 register_c_9[0x20];
+ u8 register_c_6[0x20];
+ u8 register_c_7[0x20];
+ u8 register_c_4[0x20];
+ u8 register_c_5[0x20];
+ u8 register_c_2[0x20];
+ u8 register_c_3[0x20];
+ u8 register_c_0[0x20];
+ u8 register_c_1[0x20];
+};
+
+struct mlx5_ifc_definer_hl_mpls_bits {
+ u8 mpls0_label[0x20];
+ u8 mpls1_label[0x20];
+ u8 mpls2_label[0x20];
+ u8 mpls3_label[0x20];
+ u8 mpls4_label[0x20];
+};
+
+struct mlx5_ifc_definer_hl_bits {
+ struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_outer;
+ struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_inner;
+ struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_outer;
+ struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_inner;
+ struct mlx5_ifc_definer_hl_ib_l2_bits ib_l2;
+ struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_outer;
+ struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_inner;
+ struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_outer;
+ struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_inner;
+ struct mlx5_ifc_definer_hl_src_qp_gvmi_bits source_qp_gvmi;
+ struct mlx5_ifc_definer_hl_ib_l4_bits ib_l4;
+ struct mlx5_ifc_definer_hl_oks1_bits oks1;
+ struct mlx5_ifc_definer_hl_oks2_bits oks2;
+ struct mlx5_ifc_definer_hl_voq_bits voq;
+ u8 reserved_at_480[0x380];
+ struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_outer;
+ struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_inner;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_outer;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_inner;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_outer;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_inner;
+ u8 unsupported_dest_ib_l3[0x80];
+ u8 unsupported_source_ib_l3[0x80];
+ u8 unsupported_udp_misc_outer[0x20];
+ u8 unsupported_udp_misc_inner[0x20];
+ struct mlx5_ifc_definer_tcp_icmp_header_bits tcp_icmp;
+ struct mlx5_ifc_definer_hl_tunnel_header_bits tunnel_header;
+ struct mlx5_ifc_definer_hl_mpls_bits mpls_outer;
+ struct mlx5_ifc_definer_hl_mpls_bits mpls_inner;
+ u8 unsupported_config_headers_outer[0x80];
+ u8 unsupported_config_headers_inner[0x80];
+ struct mlx5_ifc_definer_hl_random_number_bits random_number;
+ struct mlx5_ifc_definer_hl_ipsec_bits ipsec;
+ struct mlx5_ifc_definer_hl_metadata_bits metadata;
+ u8 unsupported_utc_timestamp[0x40];
+ u8 unsupported_free_running_timestamp[0x40];
+ struct mlx5_ifc_definer_hl_flex_parser_bits flex_parser;
+ struct mlx5_ifc_definer_hl_registers_bits registers;
+ /* Reserved in case header layout on future HW */
+ u8 unsupported_reserved[0xd40];
+};
+
+enum mlx5hws_definer_gtp {
+ MLX5HWS_DEFINER_GTP_EXT_HDR_BIT = 0x04,
+};
+
+struct mlx5_ifc_header_gtp_bits {
+ u8 version[0x3];
+ u8 proto_type[0x1];
+ u8 reserved1[0x1];
+ union {
+ u8 msg_flags[0x3];
+ struct {
+ u8 ext_hdr_flag[0x1];
+ u8 seq_num_flag[0x1];
+ u8 pdu_flag[0x1];
+ };
+ };
+ u8 msg_type[0x8];
+ u8 msg_len[0x8];
+ u8 teid[0x20];
+};
+
+struct mlx5_ifc_header_opt_gtp_bits {
+ u8 seq_num[0x10];
+ u8 pdu_num[0x8];
+ u8 next_ext_hdr_type[0x8];
+};
+
+struct mlx5_ifc_header_gtp_psc_bits {
+ u8 len[0x8];
+ u8 pdu_type[0x4];
+ u8 flags[0x4];
+ u8 qfi[0x8];
+ u8 reserved2[0x8];
+};
+
+struct mlx5_ifc_header_ipv6_vtc_bits {
+ u8 version[0x4];
+ union {
+ u8 tos[0x8];
+ struct {
+ u8 dscp[0x6];
+ u8 ecn[0x2];
+ };
+ };
+ u8 flow_label[0x14];
+};
+
+struct mlx5_ifc_header_ipv6_routing_ext_bits {
+ u8 next_hdr[0x8];
+ u8 hdr_len[0x8];
+ u8 type[0x8];
+ u8 segments_left[0x8];
+ union {
+ u8 flags[0x20];
+ struct {
+ u8 last_entry[0x8];
+ u8 flag[0x8];
+ u8 tag[0x10];
+ };
+ };
+};
+
+struct mlx5_ifc_header_vxlan_bits {
+ u8 flags[0x8];
+ u8 reserved1[0x18];
+ u8 vni[0x18];
+ u8 reserved2[0x8];
+};
+
+struct mlx5_ifc_header_vxlan_gpe_bits {
+ u8 flags[0x8];
+ u8 rsvd0[0x10];
+ u8 protocol[0x8];
+ u8 vni[0x18];
+ u8 rsvd1[0x8];
+};
+
+struct mlx5_ifc_header_gre_bits {
+ union {
+ u8 c_rsvd0_ver[0x10];
+ struct {
+ u8 gre_c_present[0x1];
+ u8 reserved_at_1[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 reserved_at_4[0x9];
+ u8 version[0x3];
+ };
+ };
+ u8 gre_protocol[0x10];
+ u8 checksum[0x10];
+ u8 reserved_at_30[0x10];
+};
+
+struct mlx5_ifc_header_geneve_bits {
+ union {
+ u8 ver_opt_len_o_c_rsvd[0x10];
+ struct {
+ u8 version[0x2];
+ u8 opt_len[0x6];
+ u8 o_flag[0x1];
+ u8 c_flag[0x1];
+ u8 reserved_at_a[0x6];
+ };
+ };
+ u8 protocol_type[0x10];
+ u8 vni[0x18];
+ u8 reserved_at_38[0x8];
+};
+
+struct mlx5_ifc_header_geneve_opt_bits {
+ u8 class[0x10];
+ u8 type[0x8];
+ u8 reserved[0x3];
+ u8 len[0x5];
+};
+
+struct mlx5_ifc_header_icmp_bits {
+ union {
+ u8 icmp_dw1[0x20];
+ struct {
+ u8 type[0x8];
+ u8 code[0x8];
+ u8 cksum[0x10];
+ };
+ };
+ union {
+ u8 icmp_dw2[0x20];
+ struct {
+ u8 ident[0x10];
+ u8 seq_nb[0x10];
+ };
+ };
+};
+
+struct mlx5hws_definer {
+ enum mlx5hws_definer_type type;
+ u8 dw_selector[DW_SELECTORS];
+ u8 byte_selector[BYTE_SELECTORS];
+ struct mlx5hws_rule_match_tag mask;
+ u32 obj_id;
+};
+
+struct mlx5hws_definer_cache {
+ struct list_head list_head;
+};
+
+struct mlx5hws_definer_cache_item {
+ struct mlx5hws_definer definer;
+ u32 refcount;
+ struct list_head list_node;
+};
+
+static inline bool
+mlx5hws_definer_is_jumbo(struct mlx5hws_definer *definer)
+{
+ return (definer->type == MLX5HWS_DEFINER_TYPE_JUMBO);
+}
+
+void mlx5hws_definer_create_tag(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag);
+
+int mlx5hws_definer_get_id(struct mlx5hws_definer *definer);
+
+int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt);
+
+void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt);
+
+int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache);
+
+void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache);
+
+int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
+ struct mlx5hws_definer *definer_b);
+
+int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer);
+
+void mlx5hws_definer_free(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer);
+
+int mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_definer);
+
+struct mlx5hws_definer_fc *
+mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ u32 *match_param,
+ int *fc_sz);
+
+#endif /* MLX5HWS_DEFINER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h
new file mode 100644
index 000000000000..5643be1cd5bf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_INTERNAL_H_
+#define MLX5HWS_INTERNAL_H_
+
+#include <linux/mlx5/transobj.h>
+#include <linux/mlx5/vport.h>
+#include "fs_core.h"
+#include "wq.h"
+#include "lib/mlx5.h"
+
+#include "mlx5hws_prm.h"
+#include "mlx5hws.h"
+#include "mlx5hws_pool.h"
+#include "mlx5hws_vport.h"
+#include "mlx5hws_context.h"
+#include "mlx5hws_table.h"
+#include "mlx5hws_send.h"
+#include "mlx5hws_rule.h"
+#include "mlx5hws_cmd.h"
+#include "mlx5hws_action.h"
+#include "mlx5hws_definer.h"
+#include "mlx5hws_matcher.h"
+#include "mlx5hws_debug.h"
+#include "mlx5hws_pat_arg.h"
+#include "mlx5hws_bwc.h"
+#include "mlx5hws_bwc_complex.h"
+
+#define W_SIZE 2
+#define DW_SIZE 4
+#define BITS_IN_BYTE 8
+#define BITS_IN_DW (BITS_IN_BYTE * DW_SIZE)
+
+#define IS_BIT_SET(_value, _bit) ((_value) & (1ULL << (_bit)))
+
+#define mlx5hws_err(ctx, arg...) mlx5_core_err((ctx)->mdev, ##arg)
+#define mlx5hws_info(ctx, arg...) mlx5_core_info((ctx)->mdev, ##arg)
+#define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg)
+
+#define MLX5HWS_TABLE_TYPE_BASE 2
+#define MLX5HWS_ACTION_STE_IDX_ANY 0
+
+static inline bool is_mem_zero(const u8 *mem, size_t size)
+{
+ if (unlikely(!size)) {
+ pr_warn("HWS: invalid buffer of size 0 in %s\n", __func__);
+ return true;
+ }
+
+ return (*mem == 0) && memcmp(mem, mem + 1, size - 1) == 0;
+}
+
+static inline unsigned long align(unsigned long val, unsigned long align)
+{
+ return (val + align - 1) & ~(align - 1);
+}
+
+#endif /* MLX5HWS_INTERNAL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c
new file mode 100644
index 000000000000..61a1155d4b4f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c
@@ -0,0 +1,1216 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+enum mlx5hws_matcher_rtc_type {
+ HWS_MATCHER_RTC_TYPE_MATCH,
+ HWS_MATCHER_RTC_TYPE_STE_ARRAY,
+ HWS_MATCHER_RTC_TYPE_MAX,
+};
+
+static const char * const mlx5hws_matcher_rtc_type_str[] = {
+ [HWS_MATCHER_RTC_TYPE_MATCH] = "MATCH",
+ [HWS_MATCHER_RTC_TYPE_STE_ARRAY] = "STE_ARRAY",
+ [HWS_MATCHER_RTC_TYPE_MAX] = "UNKNOWN",
+};
+
+static const char *hws_matcher_rtc_type_to_str(enum mlx5hws_matcher_rtc_type rtc_type)
+{
+ if (rtc_type > HWS_MATCHER_RTC_TYPE_MAX)
+ rtc_type = HWS_MATCHER_RTC_TYPE_MAX;
+ return mlx5hws_matcher_rtc_type_str[rtc_type];
+}
+
+static bool hws_matcher_requires_col_tbl(u8 log_num_of_rules)
+{
+ /* Collision table concatenation is done only for large rule tables */
+ return log_num_of_rules > MLX5HWS_MATCHER_ASSURED_RULES_TH;
+}
+
+static u8 hws_matcher_rules_to_tbl_depth(u8 log_num_of_rules)
+{
+ if (hws_matcher_requires_col_tbl(log_num_of_rules))
+ return MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH;
+
+ /* For small rule tables we use a single deep table to assure insertion */
+ return min(log_num_of_rules, MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH);
+}
+
+static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher)
+{
+ mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id);
+}
+
+static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ int ret;
+
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &matcher->end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *prev = NULL;
+ struct mlx5hws_matcher *next = NULL;
+ struct mlx5hws_matcher *tmp_matcher;
+ int ret;
+
+ /* Find location in matcher list */
+ if (list_empty(&tbl->matchers_list)) {
+ list_add(&matcher->list_node, &tbl->matchers_list);
+ goto connect;
+ }
+
+ list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node) {
+ if (tmp_matcher->attr.priority > matcher->attr.priority) {
+ next = tmp_matcher;
+ break;
+ }
+ prev = tmp_matcher;
+ }
+
+ if (next)
+ /* insert before next */
+ list_add_tail(&matcher->list_node, &next->list_node);
+ else
+ /* insert after prev */
+ list_add(&matcher->list_node, &prev->list_node);
+
+connect:
+ if (next) {
+ /* Connect to next RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ matcher->end_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to connect new matcher to next RTC\n");
+ goto remove_from_list;
+ }
+ } else {
+ /* Connect last matcher to next miss_tbl if exists */
+ ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed connect new matcher to miss_tbl\n");
+ goto remove_from_list;
+ }
+ }
+
+ /* Connect to previous FT */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ prev ? prev->end_ft_id : tbl->ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to connect new matcher to previous FT\n");
+ goto remove_from_list;
+ }
+
+ /* Reset prev matcher FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev ? prev->end_ft_id : tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to reset matcher ft default miss\n");
+ goto remove_from_list;
+ }
+
+ if (!prev) {
+ /* Update tables missing to current matcher in the table */
+ ret = mlx5hws_table_update_connected_miss_tables(tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Fatal error, failed to update connected miss table\n");
+ goto remove_from_list;
+ }
+ }
+
+ return 0;
+
+remove_from_list:
+ list_del_init(&matcher->list_node);
+ return ret;
+}
+
+static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher *next = NULL, *prev = NULL;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ u32 prev_ft_id = tbl->ft_id;
+ int ret;
+
+ if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) {
+ prev = list_prev_entry(matcher, list_node);
+ prev_ft_id = prev->end_ft_id;
+ }
+
+ if (!list_is_last(&matcher->list_node, &tbl->matchers_list))
+ next = list_next_entry(matcher, list_node);
+
+ list_del_init(&matcher->list_node);
+
+ if (next) {
+ /* Connect previous end FT to next RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx,
+ prev_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n");
+ goto matcher_reconnect;
+ }
+ } else {
+ ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n");
+ goto matcher_reconnect;
+ }
+ }
+
+ /* Removing first matcher, update connected miss tables if exists */
+ if (prev_ft_id == tbl->ft_id) {
+ ret = mlx5hws_table_update_connected_miss_tables(tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n");
+ goto matcher_reconnect;
+ }
+ }
+
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n");
+ goto matcher_reconnect;
+ }
+
+ return 0;
+
+matcher_reconnect:
+ if (list_empty(&tbl->matchers_list) || !prev)
+ list_add(&matcher->list_node, &tbl->matchers_list);
+ else
+ /* insert after prev matcher */
+ list_add(&matcher->list_node, &prev->list_node);
+
+ return ret;
+}
+
+static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ enum mlx5hws_matcher_rtc_type rtc_type,
+ bool is_mirror)
+{
+ struct mlx5hws_pool_chunk *ste = &matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].ste;
+ enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src;
+ bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH;
+
+ if ((flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT && !is_mirror) ||
+ (flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE && is_mirror)) {
+ /* Optimize FDB RTC */
+ rtc_attr->log_size = 0;
+ rtc_attr->log_depth = 0;
+ } else {
+ /* Keep original values */
+ rtc_attr->log_size = is_match_rtc ? matcher->attr.table.sz_row_log : ste->order;
+ rtc_attr->log_depth = is_match_rtc ? matcher->attr.table.sz_col_log : 0;
+ }
+}
+
+static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
+ enum mlx5hws_matcher_rtc_type rtc_type,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
+ struct mlx5hws_match_template *mt = matcher->mt;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_action_default_stc *default_stc;
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_pool *ste_pool, *stc_pool;
+ struct mlx5hws_pool_chunk *ste;
+ u32 *rtc_0_id, *rtc_1_id;
+ u32 obj_id;
+ int ret;
+
+ switch (rtc_type) {
+ case HWS_MATCHER_RTC_TYPE_MATCH:
+ rtc_0_id = &matcher->match_ste.rtc_0_id;
+ rtc_1_id = &matcher->match_ste.rtc_1_id;
+ ste_pool = matcher->match_ste.pool;
+ ste = &matcher->match_ste.ste;
+ ste->order = attr->table.sz_col_log + attr->table.sz_row_log;
+
+ rtc_attr.log_size = attr->table.sz_row_log;
+ rtc_attr.log_depth = attr->table.sz_col_log;
+ rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+ rtc_attr.is_scnd_range = 0;
+ rtc_attr.miss_ft_id = matcher->end_ft_id;
+
+ if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
+ /* The usual Hash Table */
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+
+ /* The first mt is used since all share the same definer */
+ rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ rtc_attr.num_hash_definer = 1;
+
+ if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ /* Hash Split Table */
+ rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+ rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+ /* Linear Lookup Table */
+ rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
+ rtc_attr.match_definer_0 = ctx->caps->linear_match_definer;
+ }
+ }
+
+ /* Match pool requires implicit allocation */
+ ret = mlx5hws_pool_chunk_alloc(ste_pool, ste);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate STE for %s RTC",
+ hws_matcher_rtc_type_to_str(rtc_type));
+ return ret;
+ }
+ break;
+
+ case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
+ action_ste = &matcher->action_ste[action_ste_selector];
+
+ rtc_0_id = &action_ste->rtc_0_id;
+ rtc_1_id = &action_ste->rtc_1_id;
+ ste_pool = action_ste->pool;
+ ste = &action_ste->ste;
+ ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
+ attr->table.sz_row_log;
+ rtc_attr.log_size = ste->order;
+ rtc_attr.log_depth = 0;
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ /* The action STEs use the default always hit definer */
+ rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+ rtc_attr.is_frst_jumbo = false;
+ rtc_attr.miss_ft_id = 0;
+ break;
+
+ default:
+ mlx5hws_err(ctx, "HWS Invalid RTC type\n");
+ return -EINVAL;
+ }
+
+ obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.ste_offset = ste->offset;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false);
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false);
+
+ /* STC is a single resource (obj_id), use any STC for the ID */
+ stc_pool = ctx->stc_pool[tbl->type];
+ default_stc = ctx->common_res[tbl->type].default_stc;
+ obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create matcher RTC of type %s",
+ hws_matcher_rtc_type_to_str(rtc_type));
+ goto free_ste;
+ }
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true);
+
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, true);
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create peer matcher RTC of type %s",
+ hws_matcher_rtc_type_to_str(rtc_type));
+ goto destroy_rtc_0;
+ }
+ }
+
+ return 0;
+
+destroy_rtc_0:
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
+free_ste:
+ if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
+ mlx5hws_pool_chunk_free(ste_pool, ste);
+ return ret;
+}
+
+static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
+ enum mlx5hws_matcher_rtc_type rtc_type,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_pool_chunk *ste;
+ struct mlx5hws_pool *ste_pool;
+ u32 rtc_0_id, rtc_1_id;
+
+ switch (rtc_type) {
+ case HWS_MATCHER_RTC_TYPE_MATCH:
+ rtc_0_id = matcher->match_ste.rtc_0_id;
+ rtc_1_id = matcher->match_ste.rtc_1_id;
+ ste_pool = matcher->match_ste.pool;
+ ste = &matcher->match_ste.ste;
+ break;
+ case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
+ action_ste = &matcher->action_ste[action_ste_selector];
+ rtc_0_id = action_ste->rtc_0_id;
+ rtc_1_id = action_ste->rtc_1_id;
+ ste_pool = action_ste->pool;
+ ste = &action_ste->ste;
+ break;
+ default:
+ return;
+ }
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_1_id);
+
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_0_id);
+ if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
+ mlx5hws_pool_chunk_free(ste_pool, ste);
+}
+
+static int
+hws_matcher_check_attr_sz(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+ if (attr->table.sz_col_log > caps->rtc_log_depth_max) {
+ mlx5hws_err(matcher->tbl->ctx, "Matcher depth exceeds limit %d\n",
+ caps->rtc_log_depth_max);
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->table.sz_col_log + attr->table.sz_row_log > caps->ste_alloc_log_max) {
+ mlx5hws_err(matcher->tbl->ctx, "Total matcher size exceeds limit %d\n",
+ caps->ste_alloc_log_max);
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->table.sz_col_log + attr->table.sz_row_log < caps->ste_alloc_log_gran) {
+ mlx5hws_err(matcher->tbl->ctx, "Total matcher size below limit %d\n",
+ caps->ste_alloc_log_gran);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void hws_matcher_set_pool_attr(struct mlx5hws_pool_attr *attr,
+ struct mlx5hws_matcher *matcher)
+{
+ switch (matcher->attr.optimize_flow_src) {
+ case MLX5HWS_MATCHER_FLOW_SRC_VPORT:
+ attr->opt_type = MLX5HWS_POOL_OPTIMIZE_ORIG;
+ break;
+ case MLX5HWS_MATCHER_FLOW_SRC_WIRE:
+ attr->opt_type = MLX5HWS_POOL_OPTIMIZE_MIRROR;
+ break;
+ default:
+ break;
+ }
+}
+
+static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ bool valid;
+ int ret;
+
+ valid = mlx5hws_action_check_combo(ctx, at->action_type_arr, matcher->tbl->type);
+ if (!valid) {
+ mlx5hws_err(ctx, "Invalid combination in action template\n");
+ return -EINVAL;
+ }
+
+ /* Process action template to setters */
+ ret = mlx5hws_action_template_process(at);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to process action template\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher)
+{
+ struct mlx5hws_matcher_resize_data *resize_data;
+
+ resize_data = kzalloc(sizeof(*resize_data), GFP_KERNEL);
+ if (!resize_data)
+ return -ENOMEM;
+
+ resize_data->max_stes = src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
+
+ resize_data->action_ste[0].stc = src_matcher->action_ste[0].stc;
+ resize_data->action_ste[0].rtc_0_id = src_matcher->action_ste[0].rtc_0_id;
+ resize_data->action_ste[0].rtc_1_id = src_matcher->action_ste[0].rtc_1_id;
+ resize_data->action_ste[0].pool = src_matcher->action_ste[0].max_stes ?
+ src_matcher->action_ste[0].pool :
+ NULL;
+ resize_data->action_ste[1].stc = src_matcher->action_ste[1].stc;
+ resize_data->action_ste[1].rtc_0_id = src_matcher->action_ste[1].rtc_0_id;
+ resize_data->action_ste[1].rtc_1_id = src_matcher->action_ste[1].rtc_1_id;
+ resize_data->action_ste[1].pool = src_matcher->action_ste[1].max_stes ?
+ src_matcher->action_ste[1].pool :
+ NULL;
+
+ /* Place the new resized matcher on the dst matcher's list */
+ list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
+
+ /* Move all the previous resized matchers to the dst matcher's list */
+ while (!list_empty(&src_matcher->resize_data)) {
+ resize_data = list_first_entry(&src_matcher->resize_data,
+ struct mlx5hws_matcher_resize_data,
+ list_node);
+ list_del_init(&resize_data->list_node);
+ list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
+ }
+
+ return 0;
+}
+
+static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_resize_data *resize_data;
+
+ if (!mlx5hws_matcher_is_resizable(matcher))
+ return;
+
+ while (!list_empty(&matcher->resize_data)) {
+ resize_data = list_first_entry(&matcher->resize_data,
+ struct mlx5hws_matcher_resize_data,
+ list_node);
+ list_del_init(&resize_data->list_node);
+
+ if (resize_data->max_stes) {
+ mlx5hws_action_free_single_stc(matcher->tbl->ctx,
+ matcher->tbl->type,
+ &resize_data->action_ste[1].stc);
+ mlx5hws_action_free_single_stc(matcher->tbl->ctx,
+ matcher->tbl->type,
+ &resize_data->action_ste[0].stc);
+
+ if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[1].rtc_1_id);
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[0].rtc_1_id);
+ }
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[1].rtc_0_id);
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[0].rtc_0_id);
+ if (resize_data->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].pool) {
+ mlx5hws_pool_destroy(resize_data->action_ste[1].pool);
+ mlx5hws_pool_destroy(resize_data->action_ste[0].pool);
+ }
+ }
+
+ kfree(resize_data);
+ }
+}
+
+static int
+hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_pool_attr pool_attr = {0};
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ action_ste = &matcher->action_ste[action_ste_selector];
+
+ /* Allocate action STE mempool */
+ pool_attr.table_type = tbl->type;
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
+ pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
+ matcher->attr.table.sz_row_log;
+ hws_matcher_set_pool_attr(&pool_attr, matcher);
+ action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!action_ste->pool) {
+ mlx5hws_err(ctx, "Failed to create action ste pool\n");
+ return -EINVAL;
+ }
+
+ /* Allocate action RTC */
+ ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create action RTC\n");
+ goto free_ste_pool;
+ }
+
+ /* Allocate STC for jumps to STE */
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.ste_table.ste = action_ste->ste;
+ stc_attr.ste_table.ste_pool = action_ste->pool;
+ stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl->type,
+ &action_ste->stc);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create action jump to table STC\n");
+ goto free_rtc;
+ }
+
+ return 0;
+
+free_rtc:
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+free_ste_pool:
+ mlx5hws_pool_destroy(action_ste->pool);
+ return ret;
+}
+
+static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+{
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+
+ action_ste = &matcher->action_ste[action_ste_selector];
+
+ if (!action_ste->max_stes ||
+ matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION ||
+ mlx5hws_matcher_is_in_resize(matcher))
+ return;
+
+ mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ mlx5hws_pool_destroy(action_ste->pool);
+}
+
+static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u32 required_stes;
+ u8 max_stes = 0;
+ int i, ret;
+
+ if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
+ return 0;
+
+ for (i = 0; i < matcher->num_of_at; i++) {
+ struct mlx5hws_action_template *at = &matcher->at[i];
+
+ ret = hws_matcher_check_and_process_at(matcher, at);
+ if (ret) {
+ mlx5hws_err(ctx, "Invalid at %d", i);
+ return ret;
+ }
+
+ required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+ max_stes = max(max_stes, required_stes);
+
+ /* Future: Optimize reparse */
+ }
+
+ /* There are no additional STEs required for matcher */
+ if (!max_stes)
+ return 0;
+
+ matcher->action_ste[0].max_stes = max_stes;
+ matcher->action_ste[1].max_stes = max_stes;
+
+ ret = hws_matcher_bind_at_idx(matcher, 0);
+ if (ret)
+ return ret;
+
+ ret = hws_matcher_bind_at_idx(matcher, 1);
+ if (ret)
+ goto free_at_0;
+
+ return 0;
+
+free_at_0:
+ hws_matcher_unbind_at_idx(matcher, 0);
+ return ret;
+}
+
+static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
+{
+ hws_matcher_unbind_at_idx(matcher, 1);
+ hws_matcher_unbind_at_idx(matcher, 0);
+}
+
+static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_pool_attr pool_attr = {0};
+ int ret;
+
+ /* Calculate match, range and hash definers */
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) {
+ ret = mlx5hws_definer_mt_init(ctx, matcher->mt);
+ if (ret) {
+ if (ret == -E2BIG)
+ mlx5hws_err(ctx, "Failed to set matcher templates with match definers\n");
+ return ret;
+ }
+ }
+
+ /* Create an STE pool per matcher*/
+ pool_attr.table_type = matcher->tbl->type;
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL;
+ pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log +
+ matcher->attr.table.sz_row_log;
+ hws_matcher_set_pool_attr(&pool_attr, matcher);
+
+ matcher->match_ste.pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!matcher->match_ste.pool) {
+ mlx5hws_err(ctx, "Failed to allocate matcher STE pool\n");
+ ret = -EOPNOTSUPP;
+ goto uninit_match_definer;
+ }
+
+ return 0;
+
+uninit_match_definer:
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ mlx5hws_definer_mt_uninit(ctx, matcher->mt);
+ return ret;
+}
+
+static void hws_matcher_unbind_mt(struct mlx5hws_matcher *matcher)
+{
+ mlx5hws_pool_destroy(matcher->match_ste.pool);
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ mlx5hws_definer_mt_uninit(matcher->tbl->ctx, matcher->mt);
+}
+
+static int
+hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+ switch (attr->insert_mode) {
+ case MLX5HWS_MATCHER_INSERT_BY_HASH:
+ if (matcher->attr.distribute_mode != MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ mlx5hws_err(ctx, "Invalid matcher distribute mode\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case MLX5HWS_MATCHER_INSERT_BY_INDEX:
+ if (attr->table.sz_col_log) {
+ mlx5hws_err(ctx, "Matcher with INSERT_BY_INDEX supports only Nx1 table size\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ /* Hash Split Table */
+ if (!caps->rtc_hash_split_table) {
+ mlx5hws_err(ctx, "FW doesn't support insert by index and hash distribute\n");
+ return -EOPNOTSUPP;
+ }
+ } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+ /* Linear Lookup Table */
+ if (!caps->rtc_linear_lookup_table ||
+ !IS_BIT_SET(caps->access_index_mode,
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR)) {
+ mlx5hws_err(ctx, "FW doesn't support insert by index and linear distribute\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->table.sz_row_log > MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX) {
+ mlx5hws_err(ctx, "Matcher with linear distribute: rows exceed limit %d",
+ MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX);
+ return -EOPNOTSUPP;
+ }
+ } else {
+ mlx5hws_err(ctx, "Matcher has unsupported distribute mode\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ mlx5hws_err(ctx, "Matcher has unsupported insert mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+ if (hws_matcher_validate_insert_mode(caps, matcher))
+ return -EOPNOTSUPP;
+
+ if (matcher->tbl->type != MLX5HWS_TABLE_TYPE_FDB && attr->optimize_flow_src) {
+ mlx5hws_err(matcher->tbl->ctx, "NIC domain doesn't support flow_src\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Convert number of rules to the required depth */
+ if (attr->mode == MLX5HWS_MATCHER_RESOURCE_MODE_RULE &&
+ attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH)
+ attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log);
+
+ matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0;
+
+ return hws_matcher_check_attr_sz(caps, matcher);
+}
+
+static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
+{
+ int ret;
+
+ /* Select and create the definers for current matcher */
+ ret = hws_matcher_bind_mt(matcher);
+ if (ret)
+ return ret;
+
+ /* Calculate and verify action combination */
+ ret = hws_matcher_bind_at(matcher);
+ if (ret)
+ goto unbind_mt;
+
+ /* Create matcher end flow table anchor */
+ ret = hws_matcher_create_end_ft(matcher);
+ if (ret)
+ goto unbind_at;
+
+ /* Allocate the RTC for the new matcher */
+ ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ if (ret)
+ goto destroy_end_ft;
+
+ /* Connect the matcher to the matcher list */
+ ret = hws_matcher_connect(matcher);
+ if (ret)
+ goto destroy_rtc;
+
+ return 0;
+
+destroy_rtc:
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+destroy_end_ft:
+ hws_matcher_destroy_end_ft(matcher);
+unbind_at:
+ hws_matcher_unbind_at(matcher);
+unbind_mt:
+ hws_matcher_unbind_mt(matcher);
+ return ret;
+}
+
+static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher)
+{
+ hws_matcher_resize_uninit(matcher);
+ hws_matcher_disconnect(matcher);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ hws_matcher_destroy_end_ft(matcher);
+ hws_matcher_unbind_at(matcher);
+ hws_matcher_unbind_mt(matcher);
+}
+
+static int
+hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_matcher *col_matcher;
+ int ret;
+
+ if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
+ matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
+ return 0;
+
+ if (!hws_matcher_requires_col_tbl(matcher->attr.rule.num_log))
+ return 0;
+
+ col_matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+ if (!col_matcher)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&col_matcher->resize_data);
+
+ col_matcher->tbl = matcher->tbl;
+ col_matcher->mt = matcher->mt;
+ col_matcher->at = matcher->at;
+ col_matcher->num_of_at = matcher->num_of_at;
+ col_matcher->num_of_mt = matcher->num_of_mt;
+ col_matcher->attr.priority = matcher->attr.priority;
+ col_matcher->flags = matcher->flags;
+ col_matcher->flags |= MLX5HWS_MATCHER_FLAGS_COLLISION;
+ col_matcher->attr.mode = MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE;
+ col_matcher->attr.optimize_flow_src = matcher->attr.optimize_flow_src;
+ col_matcher->attr.table.sz_row_log = matcher->attr.rule.num_log;
+ col_matcher->attr.table.sz_col_log = MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH;
+ if (col_matcher->attr.table.sz_row_log > MLX5HWS_MATCHER_ASSURED_ROW_RATIO)
+ col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO;
+
+ col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach;
+
+ ret = hws_matcher_process_attr(ctx->caps, col_matcher);
+ if (ret)
+ goto free_col_matcher;
+
+ ret = hws_matcher_create_and_connect(col_matcher);
+ if (ret)
+ goto free_col_matcher;
+
+ matcher->col_matcher = col_matcher;
+
+ return 0;
+
+free_col_matcher:
+ kfree(col_matcher);
+ mlx5hws_err(ctx, "Failed to create assured collision matcher\n");
+ return ret;
+}
+
+static void
+hws_matcher_destroy_col_matcher(struct mlx5hws_matcher *matcher)
+{
+ if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
+ matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
+ return;
+
+ if (matcher->col_matcher) {
+ hws_matcher_destroy_and_disconnect(matcher->col_matcher);
+ kfree(matcher->col_matcher);
+ }
+}
+
+static int hws_matcher_init(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ int ret;
+
+ INIT_LIST_HEAD(&matcher->resize_data);
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Allocate matcher resource and connect to the packet pipe */
+ ret = hws_matcher_create_and_connect(matcher);
+ if (ret)
+ goto unlock_err;
+
+ /* Create additional matcher for collision handling */
+ ret = hws_matcher_create_col_matcher(matcher);
+ if (ret)
+ goto destory_and_disconnect;
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+destory_and_disconnect:
+ hws_matcher_destroy_and_disconnect(matcher);
+unlock_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static int hws_matcher_uninit(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+ mutex_lock(&ctx->ctrl_lock);
+ hws_matcher_destroy_col_matcher(matcher);
+ hws_matcher_destroy_and_disconnect(matcher);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+}
+
+int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u32 required_stes;
+ int ret;
+
+ if (!matcher->attr.max_num_of_at_attach) {
+ mlx5hws_dbg(ctx, "Num of current at (%d) exceed allowed value\n",
+ matcher->num_of_at);
+ return -EOPNOTSUPP;
+ }
+
+ ret = hws_matcher_check_and_process_at(matcher, at);
+ if (ret)
+ return ret;
+
+ required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+ if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) {
+ mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n",
+ required_stes,
+ matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes);
+ return -ENOMEM;
+ }
+
+ matcher->at[matcher->num_of_at] = *at;
+ matcher->num_of_at += 1;
+ matcher->attr.max_num_of_at_attach -= 1;
+
+ if (matcher->col_matcher)
+ matcher->col_matcher->num_of_at = matcher->num_of_at;
+
+ return 0;
+}
+
+static int
+hws_matcher_set_templates(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ int ret = 0;
+ int i;
+
+ if (!num_of_mt || !num_of_at) {
+ mlx5hws_err(ctx, "Number of action/match template cannot be zero\n");
+ return -EOPNOTSUPP;
+ }
+
+ matcher->mt = kcalloc(num_of_mt, sizeof(*matcher->mt), GFP_KERNEL);
+ if (!matcher->mt)
+ return -ENOMEM;
+
+ matcher->at = kcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
+ sizeof(*matcher->at),
+ GFP_KERNEL);
+ if (!matcher->at) {
+ mlx5hws_err(ctx, "Failed to allocate action template array\n");
+ ret = -ENOMEM;
+ goto free_mt;
+ }
+
+ for (i = 0; i < num_of_mt; i++)
+ matcher->mt[i] = *mt[i];
+
+ for (i = 0; i < num_of_at; i++)
+ matcher->at[i] = *at[i];
+
+ matcher->num_of_mt = num_of_mt;
+ matcher->num_of_at = num_of_at;
+
+ return 0;
+
+free_mt:
+ kfree(matcher->mt);
+ return ret;
+}
+
+static void
+hws_matcher_unset_templates(struct mlx5hws_matcher *matcher)
+{
+ kfree(matcher->at);
+ kfree(matcher->mt);
+}
+
+struct mlx5hws_matcher *
+mlx5hws_matcher_create(struct mlx5hws_table *tbl,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at,
+ struct mlx5hws_matcher_attr *attr)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *matcher;
+ int ret;
+
+ matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+ if (!matcher)
+ return NULL;
+
+ matcher->tbl = tbl;
+ matcher->attr = *attr;
+
+ ret = hws_matcher_process_attr(tbl->ctx->caps, matcher);
+ if (ret)
+ goto free_matcher;
+
+ ret = hws_matcher_set_templates(matcher, mt, num_of_mt, at, num_of_at);
+ if (ret)
+ goto free_matcher;
+
+ ret = hws_matcher_init(matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to initialise matcher: %d\n", ret);
+ goto unset_templates;
+ }
+
+ return matcher;
+
+unset_templates:
+ hws_matcher_unset_templates(matcher);
+free_matcher:
+ kfree(matcher);
+ return NULL;
+}
+
+int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher)
+{
+ hws_matcher_uninit(matcher);
+ hws_matcher_unset_templates(matcher);
+ kfree(matcher);
+ return 0;
+}
+
+struct mlx5hws_match_template *
+mlx5hws_match_template_create(struct mlx5hws_context *ctx,
+ u32 *match_param,
+ u32 match_param_sz,
+ u8 match_criteria_enable)
+{
+ struct mlx5hws_match_template *mt;
+
+ mt = kzalloc(sizeof(*mt), GFP_KERNEL);
+ if (!mt)
+ return NULL;
+
+ mt->match_param = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!mt->match_param)
+ goto free_template;
+
+ memcpy(mt->match_param, match_param, match_param_sz);
+ mt->match_criteria_enable = match_criteria_enable;
+
+ return mt;
+
+free_template:
+ kfree(mt);
+ return NULL;
+}
+
+int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt)
+{
+ kfree(mt->match_param);
+ kfree(mt);
+ return 0;
+}
+
+static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher)
+{
+ struct mlx5hws_context *ctx = src_matcher->tbl->ctx;
+ int i;
+
+ if (src_matcher->tbl->type != dst_matcher->tbl->type) {
+ mlx5hws_err(ctx, "Table type mismatch for src/dst matchers\n");
+ return -EINVAL;
+ }
+
+ if (!mlx5hws_matcher_is_resizable(src_matcher) ||
+ !mlx5hws_matcher_is_resizable(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matcher is not resizable\n");
+ return -EINVAL;
+ }
+
+ if (mlx5hws_matcher_is_insert_by_idx(src_matcher) !=
+ mlx5hws_matcher_is_insert_by_idx(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matchers insert mode mismatch\n");
+ return -EINVAL;
+ }
+
+ if (mlx5hws_matcher_is_in_resize(src_matcher) ||
+ mlx5hws_matcher_is_in_resize(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matcher is already in resize\n");
+ return -EINVAL;
+ }
+
+ /* Compare match templates - make sure the definers are equivalent */
+ if (src_matcher->num_of_mt != dst_matcher->num_of_mt) {
+ mlx5hws_err(ctx, "Src/dst matcher match templates mismatch\n");
+ return -EINVAL;
+ }
+
+ if (src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes >
+ dst_matcher->action_ste[0].max_stes) {
+ mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < src_matcher->num_of_mt; i++) {
+ if (mlx5hws_definer_compare(src_matcher->mt[i].definer,
+ dst_matcher->mt[i].definer)) {
+ mlx5hws_err(ctx, "Src/dst matcher definers mismatch\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher)
+{
+ int ret = 0;
+
+ mutex_lock(&src_matcher->tbl->ctx->ctrl_lock);
+
+ ret = hws_matcher_resize_precheck(src_matcher, dst_matcher);
+ if (ret)
+ goto out;
+
+ src_matcher->resize_dst = dst_matcher;
+
+ ret = hws_matcher_resize_init(src_matcher);
+ if (ret)
+ src_matcher->resize_dst = NULL;
+
+out:
+ mutex_unlock(&src_matcher->tbl->ctx->ctrl_lock);
+ return ret;
+}
+
+int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = src_matcher->tbl->ctx;
+
+ if (unlikely(!mlx5hws_matcher_is_in_resize(src_matcher))) {
+ mlx5hws_err(ctx, "Matcher is not resizable or not in resize\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(src_matcher != rule->matcher)) {
+ mlx5hws_err(ctx, "Rule doesn't belong to src matcher\n");
+ return -EINVAL;
+ }
+
+ return mlx5hws_rule_move_hws_add(rule, attr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h
new file mode 100644
index 000000000000..125391d1a114
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_MATCHER_H_
+#define MLX5HWS_MATCHER_H_
+
+/* We calculated that concatenating a collision table to the main table with
+ * 3% of the main table rows will be enough resources for high insertion
+ * success probability.
+ *
+ * The calculation: log2(2^x * 3 / 100) = log2(2^x) + log2(3/100) = x - 5.05 ~ 5
+ */
+#define MLX5HWS_MATCHER_ASSURED_ROW_RATIO 5
+/* Threshold to determine if amount of rules require a collision table */
+#define MLX5HWS_MATCHER_ASSURED_RULES_TH 10
+/* Required depth of an assured collision table */
+#define MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH 4
+/* Required depth of the main large table */
+#define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
+
+enum mlx5hws_matcher_offset {
+ MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12,
+ MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13,
+};
+
+enum mlx5hws_matcher_flags {
+ MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2,
+ MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3,
+};
+
+struct mlx5hws_match_template {
+ struct mlx5hws_definer *definer;
+ struct mlx5hws_definer_fc *fc;
+ u32 *match_param;
+ u8 match_criteria_enable;
+ u16 fc_sz;
+};
+
+struct mlx5hws_matcher_match_ste {
+ struct mlx5hws_pool_chunk ste;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct mlx5hws_pool *pool;
+};
+
+struct mlx5hws_matcher_action_ste {
+ struct mlx5hws_pool_chunk ste;
+ struct mlx5hws_pool_chunk stc;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct mlx5hws_pool *pool;
+ u8 max_stes;
+};
+
+struct mlx5hws_matcher_resize_data_node {
+ struct mlx5hws_pool_chunk stc;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct mlx5hws_pool *pool;
+};
+
+struct mlx5hws_matcher_resize_data {
+ struct mlx5hws_matcher_resize_data_node action_ste[2];
+ u8 max_stes;
+ struct list_head list_node;
+};
+
+struct mlx5hws_matcher {
+ struct mlx5hws_table *tbl;
+ struct mlx5hws_matcher_attr attr;
+ struct mlx5hws_match_template *mt;
+ struct mlx5hws_action_template *at;
+ u8 num_of_at;
+ u8 num_of_mt;
+ /* enum mlx5hws_matcher_flags */
+ u8 flags;
+ u32 end_ft_id;
+ struct mlx5hws_matcher *col_matcher;
+ struct mlx5hws_matcher *resize_dst;
+ struct mlx5hws_matcher_match_ste match_ste;
+ struct mlx5hws_matcher_action_ste action_ste[2];
+ struct list_head list_node;
+ struct list_head resize_data;
+};
+
+static inline bool
+mlx5hws_matcher_mt_is_jumbo(struct mlx5hws_match_template *mt)
+{
+ return mlx5hws_definer_is_jumbo(mt->definer);
+}
+
+static inline bool mlx5hws_matcher_is_resizable(struct mlx5hws_matcher *matcher)
+{
+ return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_RESIZABLE);
+}
+
+static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher)
+{
+ return !!matcher->resize_dst;
+}
+
+static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher)
+{
+ return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX;
+}
+
+#endif /* MLX5HWS_MATCHER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c
new file mode 100644
index 000000000000..e084a5cbf81f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_data_size_to_arg_log_size(u16 data_size)
+{
+ /* Return the roundup of log2(data_size) */
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE)
+ return MLX5HWS_ARG_CHUNK_SIZE_1;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 2)
+ return MLX5HWS_ARG_CHUNK_SIZE_2;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 4)
+ return MLX5HWS_ARG_CHUNK_SIZE_3;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 8)
+ return MLX5HWS_ARG_CHUNK_SIZE_4;
+
+ return MLX5HWS_ARG_CHUNK_SIZE_MAX;
+}
+
+u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size)
+{
+ return BIT(mlx5hws_arg_data_size_to_arg_log_size(data_size));
+}
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_get_arg_log_size(u16 num_of_actions)
+{
+ return mlx5hws_arg_data_size_to_arg_log_size(num_of_actions *
+ MLX5HWS_MODIFY_ACTION_SIZE);
+}
+
+u32 mlx5hws_arg_get_arg_size(u16 num_of_actions)
+{
+ return BIT(mlx5hws_arg_get_arg_log_size(num_of_actions));
+}
+
+bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions)
+{
+ u16 i, field;
+ u8 action_id;
+
+ for (i = 0; i < num_of_actions; i++) {
+ action_id = MLX5_GET(set_action_in, &actions[i], action_type);
+
+ switch (action_id) {
+ case MLX5_MODIFICATION_TYPE_NOP:
+ field = MLX5_MODI_OUT_NONE;
+ break;
+
+ case MLX5_MODIFICATION_TYPE_SET:
+ case MLX5_MODIFICATION_TYPE_ADD:
+ field = MLX5_GET(set_action_in, &actions[i], field);
+ break;
+
+ case MLX5_MODIFICATION_TYPE_COPY:
+ case MLX5_MODIFICATION_TYPE_ADD_FIELD:
+ field = MLX5_GET(copy_action_in, &actions[i], dst_field);
+ break;
+
+ default:
+ /* Insert/Remove/Unknown actions require reparse */
+ return true;
+ }
+
+ /* Below fields can change packet structure require a reparse */
+ if (field == MLX5_MODI_OUT_ETHERTYPE ||
+ field == MLX5_MODI_OUT_IPV6_NEXT_HDR)
+ return true;
+ }
+
+ return false;
+}
+
+/* Cache and cache element handling */
+int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache)
+{
+ struct mlx5hws_pattern_cache *new_cache;
+
+ new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
+ if (!new_cache)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&new_cache->ptrn_list);
+ mutex_init(&new_cache->lock);
+
+ *cache = new_cache;
+
+ return 0;
+}
+
+void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache)
+{
+ mutex_destroy(&cache->lock);
+ kfree(cache);
+}
+
+static bool mlx5hws_pat_compare_pattern(int cur_num_of_actions,
+ __be64 cur_actions[],
+ int num_of_actions,
+ __be64 actions[])
+{
+ int i;
+
+ if (cur_num_of_actions != num_of_actions)
+ return false;
+
+ for (i = 0; i < num_of_actions; i++) {
+ u8 action_id =
+ MLX5_GET(set_action_in, &actions[i], action_type);
+
+ if (action_id == MLX5_MODIFICATION_TYPE_COPY ||
+ action_id == MLX5_MODIFICATION_TYPE_ADD_FIELD) {
+ if (actions[i] != cur_actions[i])
+ return false;
+ } else {
+ /* Compare just the control, not the values */
+ if ((__force __be32)actions[i] !=
+ (__force __be32)cur_actions[i])
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_find_cached_pattern(struct mlx5hws_pattern_cache *cache,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pat = NULL;
+
+ list_for_each_entry(cached_pat, &cache->ptrn_list, ptrn_list_node) {
+ if (mlx5hws_pat_compare_pattern(cached_pat->mh_data.num_of_actions,
+ (__be64 *)cached_pat->mh_data.data,
+ num_of_actions,
+ actions))
+ return cached_pat;
+ }
+
+ return NULL;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions);
+ if (cached_pattern) {
+ /* LRU: move it to be first in the list */
+ list_del_init(&cached_pattern->ptrn_list_node);
+ list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ cached_pattern->refcount++;
+ }
+
+ return cached_pattern;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_add_pattern_to_cache(struct mlx5hws_pattern_cache *cache,
+ u32 pattern_id,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ cached_pattern = kzalloc(sizeof(*cached_pattern), GFP_KERNEL);
+ if (!cached_pattern)
+ return NULL;
+
+ cached_pattern->mh_data.num_of_actions = num_of_actions;
+ cached_pattern->mh_data.pattern_id = pattern_id;
+ cached_pattern->mh_data.data =
+ kmemdup(actions, num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
+ if (!cached_pattern->mh_data.data)
+ goto free_cached_obj;
+
+ list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ cached_pattern->refcount = 1;
+
+ return cached_pattern;
+
+free_cached_obj:
+ kfree(cached_pattern);
+ return NULL;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_find_cached_pattern_by_id(struct mlx5hws_pattern_cache *cache,
+ u32 ptrn_id)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern = NULL;
+
+ list_for_each_entry(cached_pattern, &cache->ptrn_list, ptrn_list_node) {
+ if (cached_pattern->mh_data.pattern_id == ptrn_id)
+ return cached_pattern;
+ }
+
+ return NULL;
+}
+
+static void
+mlx5hws_pat_remove_pattern(struct mlx5hws_pattern_cache_item *cached_pattern)
+{
+ list_del_init(&cached_pattern->ptrn_list_node);
+
+ kfree(cached_pattern->mh_data.data);
+ kfree(cached_pattern);
+}
+
+void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, u32 ptrn_id)
+{
+ struct mlx5hws_pattern_cache *cache = ctx->pattern_cache;
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ mutex_lock(&cache->lock);
+ cached_pattern = mlx5hws_pat_find_cached_pattern_by_id(cache, ptrn_id);
+ if (!cached_pattern) {
+ mlx5hws_err(ctx, "Failed to find cached pattern with provided ID\n");
+ pr_warn("HWS: pattern ID %d is not found\n", ptrn_id);
+ goto out;
+ }
+
+ if (--cached_pattern->refcount)
+ goto out;
+
+ mlx5hws_pat_remove_pattern(cached_pattern);
+ mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id);
+
+out:
+ mutex_unlock(&cache->lock);
+}
+
+int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
+ __be64 *pattern, size_t pattern_sz,
+ u32 *pattern_id)
+{
+ u16 num_of_actions = pattern_sz / MLX5HWS_MODIFY_ACTION_SIZE;
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+ u32 ptrn_id = 0;
+ int ret = 0;
+
+ mutex_lock(&ctx->pattern_cache->lock);
+
+ cached_pattern = mlx5hws_pat_get_existing_cached_pattern(ctx->pattern_cache,
+ num_of_actions,
+ pattern);
+ if (cached_pattern) {
+ *pattern_id = cached_pattern->mh_data.pattern_id;
+ goto out_unlock;
+ }
+
+ ret = mlx5hws_cmd_header_modify_pattern_create(ctx->mdev,
+ pattern_sz,
+ (u8 *)pattern,
+ &ptrn_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create pattern FW object\n");
+ goto out_unlock;
+ }
+
+ cached_pattern = mlx5hws_pat_add_pattern_to_cache(ctx->pattern_cache,
+ ptrn_id,
+ num_of_actions,
+ pattern);
+ if (!cached_pattern) {
+ mlx5hws_err(ctx, "Failed to add pattern to cache\n");
+ ret = -EINVAL;
+ goto clean_pattern;
+ }
+
+ mutex_unlock(&ctx->pattern_cache->lock);
+ *pattern_id = ptrn_id;
+
+ return ret;
+
+clean_pattern:
+ mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id);
+out_unlock:
+ mutex_unlock(&ctx->pattern_cache->lock);
+ return ret;
+}
+
+static void
+mlx5d_arg_init_send_attr(struct mlx5hws_send_engine_post_attr *send_attr,
+ void *comp_data,
+ u32 arg_idx)
+{
+ send_attr->opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ send_attr->opmod = MLX5HWS_WQE_GTA_OPMOD_MOD_ARG;
+ send_attr->len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ send_attr->id = arg_idx;
+ send_attr->user_data = comp_data;
+}
+
+void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg = NULL;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl = NULL;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ size_t wqe_len;
+
+ mlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx);
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ mlx5hws_action_prepare_decap_l3_data(arg_data, (u8 *)wqe_arg,
+ num_of_actions);
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+}
+
+void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
+ void *comp_data,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ int i, full_iter, leftover;
+ size_t wqe_len;
+
+ mlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx);
+
+ /* Each WQE can hold 64B of data, it might require multiple iteration */
+ full_iter = data_size / MLX5HWS_ARG_DATA_SIZE;
+ leftover = data_size & (MLX5HWS_ARG_DATA_SIZE - 1);
+
+ for (i = 0; i < full_iter; i++) {
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ memcpy(wqe_arg, arg_data, wqe_len);
+ send_attr.id = arg_idx++;
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+
+ /* Move to next argument data */
+ arg_data += MLX5HWS_ARG_DATA_SIZE;
+ }
+
+ if (leftover) {
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ memcpy(wqe_arg, arg_data, leftover);
+ send_attr.id = arg_idx;
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+ }
+}
+
+int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size)
+{
+ struct mlx5hws_send_engine *queue;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Get the control queue */
+ queue = &ctx->send_queue[ctx->queues - 1];
+
+ mlx5hws_arg_write(queue, arg_data, arg_idx, arg_data, data_size);
+
+ mlx5hws_send_engine_flush_queue(queue);
+
+ /* Poll for completion */
+ ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
+ if (ret)
+ mlx5hws_err(ctx, "Failed to drain arg queue\n");
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return ret;
+}
+
+bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
+ u32 arg_size)
+{
+ if (arg_size < ctx->caps->log_header_modify_argument_granularity ||
+ arg_size > ctx->caps->log_header_modify_argument_max_alloc) {
+ return false;
+ }
+ return true;
+}
+
+int mlx5hws_arg_create(struct mlx5hws_context *ctx,
+ u8 *data,
+ size_t data_sz,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id)
+{
+ u16 single_arg_log_sz;
+ u16 multi_arg_log_sz;
+ int ret;
+ u32 id;
+
+ single_arg_log_sz = mlx5hws_arg_data_size_to_arg_log_size(data_sz);
+ multi_arg_log_sz = single_arg_log_sz + log_bulk_sz;
+
+ if (single_arg_log_sz >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
+ mlx5hws_err(ctx, "Requested single arg %u not supported\n", single_arg_log_sz);
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5hws_arg_is_valid_arg_request_size(ctx, multi_arg_log_sz)) {
+ mlx5hws_err(ctx, "Argument log size %d not supported by FW\n", multi_arg_log_sz);
+ return -EOPNOTSUPP;
+ }
+
+ /* Alloc bulk of args */
+ ret = mlx5hws_cmd_arg_create(ctx->mdev, multi_arg_log_sz, ctx->pd_num, &id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed allocating arg in order: %d\n", multi_arg_log_sz);
+ return ret;
+ }
+
+ if (write_data) {
+ ret = mlx5hws_arg_write_inline_arg_data(ctx, id,
+ data, data_sz);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed writing arg data\n");
+ mlx5hws_cmd_arg_destroy(ctx->mdev, id);
+ return ret;
+ }
+ }
+
+ *arg_id = id;
+ return ret;
+}
+
+void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id)
+{
+ mlx5hws_cmd_arg_destroy(ctx->mdev, arg_id);
+}
+
+int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
+ __be64 *data,
+ u8 num_of_actions,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id)
+{
+ size_t data_sz = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
+ int ret;
+
+ ret = mlx5hws_arg_create(ctx,
+ (u8 *)data,
+ data_sz,
+ log_bulk_sz,
+ write_data,
+ arg_id);
+ if (ret)
+ mlx5hws_err(ctx, "Failed creating modify header arg\n");
+
+ return ret;
+}
+
+static int
+hws_action_modify_check_field_limitation(u8 action_type, __be64 *pattern)
+{
+ /* Need to check field limitation here, but for now - return OK */
+ return 0;
+}
+
+#define INVALID_FIELD 0xffff
+
+static void
+hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
+ u16 *src_field, u16 *dst_field)
+{
+ switch (action_type) {
+ case MLX5_ACTION_TYPE_SET:
+ case MLX5_ACTION_TYPE_ADD:
+ *src_field = MLX5_GET(set_action_in, pattern, field);
+ *dst_field = INVALID_FIELD;
+ break;
+ case MLX5_ACTION_TYPE_COPY:
+ *src_field = MLX5_GET(copy_action_in, pattern, src_field);
+ *dst_field = MLX5_GET(copy_action_in, pattern, dst_field);
+ break;
+ default:
+ pr_warn("HWS: invalid modify header action type %d\n", action_type);
+ }
+}
+
+bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz)
+{
+ size_t i;
+
+ for (i = 0; i < sz / MLX5HWS_MODIFY_ACTION_SIZE; i++) {
+ u8 action_type =
+ MLX5_GET(set_action_in, &pattern[i], action_type);
+ if (action_type >= MLX5_MODIFICATION_TYPE_MAX) {
+ mlx5hws_err(ctx, "Unsupported action id %d\n", action_type);
+ return false;
+ }
+ if (hws_action_modify_check_field_limitation(action_type, &pattern[i])) {
+ mlx5hws_err(ctx, "Unsupported action number %zu\n", i);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions,
+ size_t max_actions, size_t *new_size,
+ u32 *nope_location, __be64 *new_pat)
+{
+ u16 prev_src_field = 0, prev_dst_field = 0;
+ u16 src_field, dst_field;
+ u8 action_type;
+ size_t i, j;
+
+ *new_size = num_actions;
+ *nope_location = 0;
+
+ if (num_actions == 1)
+ return;
+
+ for (i = 0, j = 0; i < num_actions; i++, j++) {
+ action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
+
+ hws_action_modify_get_target_fields(action_type, &pattern[i],
+ &src_field, &dst_field);
+ if (i % 2) {
+ if (action_type == MLX5_ACTION_TYPE_COPY &&
+ (prev_src_field == src_field ||
+ prev_dst_field == dst_field)) {
+ /* need Nope */
+ *new_size += 1;
+ *nope_location |= BIT(i);
+ memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
+ MLX5_SET(set_action_in, &new_pat[j],
+ action_type,
+ MLX5_MODIFICATION_TYPE_NOP);
+ j++;
+ } else if (prev_src_field == src_field) {
+ /* need Nope*/
+ *new_size += 1;
+ *nope_location |= BIT(i);
+ MLX5_SET(set_action_in, &new_pat[j],
+ action_type,
+ MLX5_MODIFICATION_TYPE_NOP);
+ j++;
+ }
+ }
+ memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
+ /* check if no more space */
+ if (j > max_actions) {
+ *new_size = num_actions;
+ *nope_location = 0;
+ return;
+ }
+
+ prev_src_field = src_field;
+ prev_dst_field = dst_field;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h
new file mode 100644
index 000000000000..27ca93385b08
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_PAT_ARG_H_
+#define MLX5HWS_PAT_ARG_H_
+
+/* Modify-header arg pool */
+enum mlx5hws_arg_chunk_size {
+ MLX5HWS_ARG_CHUNK_SIZE_1,
+ /* Keep MIN updated when changing */
+ MLX5HWS_ARG_CHUNK_SIZE_MIN = MLX5HWS_ARG_CHUNK_SIZE_1,
+ MLX5HWS_ARG_CHUNK_SIZE_2,
+ MLX5HWS_ARG_CHUNK_SIZE_3,
+ MLX5HWS_ARG_CHUNK_SIZE_4,
+ MLX5HWS_ARG_CHUNK_SIZE_MAX,
+};
+
+enum {
+ MLX5HWS_MODIFY_ACTION_SIZE = 8,
+ MLX5HWS_ARG_DATA_SIZE = 64,
+};
+
+struct mlx5hws_pattern_cache {
+ struct mutex lock; /* Protect pattern list */
+ struct list_head ptrn_list;
+};
+
+struct mlx5hws_pattern_cache_item {
+ struct {
+ u32 pattern_id;
+ u8 *data;
+ u16 num_of_actions;
+ } mh_data;
+ u32 refcount;
+ struct list_head ptrn_list_node;
+};
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_get_arg_log_size(u16 num_of_actions);
+
+u32 mlx5hws_arg_get_arg_size(u16 num_of_actions);
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_data_size_to_arg_log_size(u16 data_size);
+
+u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size);
+
+int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache);
+
+void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache);
+
+bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz);
+
+int mlx5hws_arg_create(struct mlx5hws_context *ctx,
+ u8 *data,
+ size_t data_sz,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id);
+
+void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id);
+
+int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
+ __be64 *data,
+ u8 num_of_actions,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *modify_hdr_arg_id);
+
+int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
+ __be64 *pattern,
+ size_t pattern_sz,
+ u32 *ptrn_id);
+
+void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx,
+ u32 ptrn_id);
+
+bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
+ u32 arg_size);
+
+bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions);
+
+void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
+ void *comp_data,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size);
+
+void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions);
+
+int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size);
+
+void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions,
+ size_t *new_size, u32 *nope_location, __be64 *new_pat);
+#endif /* MLX5HWS_PAT_ARG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c
new file mode 100644
index 000000000000..a8a63e3278be
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+#include "mlx5hws_buddy.h"
+
+static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource)
+{
+ switch (resource->pool->type) {
+ case MLX5HWS_POOL_TYPE_STE:
+ mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id);
+ break;
+ case MLX5HWS_POOL_TYPE_STC:
+ mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id);
+ break;
+ default:
+ break;
+ }
+
+ kfree(resource);
+}
+
+static void hws_pool_resource_free(struct mlx5hws_pool *pool,
+ int resource_idx)
+{
+ hws_pool_free_one_resource(pool->resource[resource_idx]);
+ pool->resource[resource_idx] = NULL;
+
+ if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
+ hws_pool_free_one_resource(pool->mirror_resource[resource_idx]);
+ pool->mirror_resource[resource_idx] = NULL;
+ }
+}
+
+static struct mlx5hws_pool_resource *
+hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range,
+ u32 fw_ft_type)
+{
+ struct mlx5hws_cmd_ste_create_attr ste_attr;
+ struct mlx5hws_cmd_stc_create_attr stc_attr;
+ struct mlx5hws_pool_resource *resource;
+ u32 obj_id = 0;
+ int ret;
+
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
+ return NULL;
+
+ switch (pool->type) {
+ case MLX5HWS_POOL_TYPE_STE:
+ ste_attr.log_obj_range = log_range;
+ ste_attr.table_type = fw_ft_type;
+ ret = mlx5hws_cmd_ste_create(pool->ctx->mdev, &ste_attr, &obj_id);
+ break;
+ case MLX5HWS_POOL_TYPE_STC:
+ stc_attr.log_obj_range = log_range;
+ stc_attr.table_type = fw_ft_type;
+ ret = mlx5hws_cmd_stc_create(pool->ctx->mdev, &stc_attr, &obj_id);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ mlx5hws_err(pool->ctx, "Failed to allocate resource objects\n");
+ goto free_resource;
+ }
+
+ resource->pool = pool;
+ resource->range = 1 << log_range;
+ resource->base_id = obj_id;
+
+ return resource;
+
+free_resource:
+ kfree(resource);
+ return NULL;
+}
+
+static int
+hws_pool_resource_alloc(struct mlx5hws_pool *pool, u32 log_range, int idx)
+{
+ struct mlx5hws_pool_resource *resource;
+ u32 fw_ft_type, opt_log_range;
+
+ fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false);
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_range;
+ resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+ if (!resource) {
+ mlx5hws_err(pool->ctx, "Failed allocating resource\n");
+ return -EINVAL;
+ }
+
+ pool->resource[idx] = resource;
+
+ if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
+ struct mlx5hws_pool_resource *mirror_resource;
+
+ fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true);
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_range;
+ mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+ if (!mirror_resource) {
+ mlx5hws_err(pool->ctx, "Failed allocating mirrored resource\n");
+ hws_pool_free_one_resource(resource);
+ pool->resource[idx] = NULL;
+ return -EINVAL;
+ }
+ pool->mirror_resource[idx] = mirror_resource;
+ }
+
+ return 0;
+}
+
+static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
+{
+ unsigned long *cur_bmp;
+
+ cur_bmp = bitmap_zalloc(1 << log_range, GFP_KERNEL);
+ if (!cur_bmp)
+ return NULL;
+
+ bitmap_fill(cur_bmp, 1 << log_range);
+
+ return cur_bmp;
+}
+
+static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = pool->db.buddy_manager->buddies[chunk->resource_idx];
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "No such buddy (%d)\n", chunk->resource_idx);
+ return;
+ }
+
+ mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
+}
+
+static struct mlx5hws_buddy_mem *
+hws_pool_buddy_get_next_buddy(struct mlx5hws_pool *pool, int idx,
+ u32 order, bool *is_new_buddy)
+{
+ static struct mlx5hws_buddy_mem *buddy;
+ u32 new_buddy_size;
+
+ buddy = pool->db.buddy_manager->buddies[idx];
+ if (buddy)
+ return buddy;
+
+ new_buddy_size = max(pool->alloc_log_sz, order);
+ *is_new_buddy = true;
+ buddy = mlx5hws_buddy_create(new_buddy_size);
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Failed to create buddy order: %d index: %d\n",
+ new_buddy_size, idx);
+ return NULL;
+ }
+
+ if (hws_pool_resource_alloc(pool, new_buddy_size, idx) != 0) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+ pool->type, new_buddy_size, idx);
+ mlx5hws_buddy_cleanup(buddy);
+ return NULL;
+ }
+
+ pool->db.buddy_manager->buddies[idx] = buddy;
+
+ return buddy;
+}
+
+static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
+ int order,
+ u32 *buddy_idx,
+ int *seg)
+{
+ struct mlx5hws_buddy_mem *buddy;
+ bool new_mem = false;
+ int ret = 0;
+ int i;
+
+ *seg = -1;
+
+ /* Find the next free place from the buddy array */
+ while (*seg == -1) {
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ buddy = hws_pool_buddy_get_next_buddy(pool, i,
+ order,
+ &new_mem);
+ if (!buddy) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ *seg = mlx5hws_buddy_alloc_mem(buddy, order);
+ if (*seg != -1)
+ goto found;
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) {
+ mlx5hws_err(pool->ctx,
+ "Fail to allocate seg for one resource pool\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (new_mem) {
+ /* We have new memory pool, should be place for us */
+ mlx5hws_err(pool->ctx,
+ "No memory for order: %d with buddy no: %d\n",
+ order, i);
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
+found:
+ *buddy_idx = i;
+out:
+ return ret;
+}
+
+static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret = 0;
+
+ /* Go over the buddies and find next free slot */
+ ret = hws_pool_buddy_get_mem_chunk(pool, chunk->order,
+ &chunk->resource_idx,
+ &chunk->offset);
+ if (ret)
+ mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+ chunk->order);
+
+ return ret;
+}
+
+static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool)
+{
+ struct mlx5hws_buddy_mem *buddy;
+ int i;
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ buddy = pool->db.buddy_manager->buddies[i];
+ if (buddy) {
+ mlx5hws_buddy_cleanup(buddy);
+ kfree(buddy);
+ pool->db.buddy_manager->buddies[i] = NULL;
+ }
+ }
+
+ kfree(pool->db.buddy_manager);
+}
+
+static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range)
+{
+ pool->db.buddy_manager = kzalloc(sizeof(*pool->db.buddy_manager), GFP_KERNEL);
+ if (!pool->db.buddy_manager)
+ return -ENOMEM;
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE) {
+ bool new_buddy;
+
+ if (!hws_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) {
+ mlx5hws_err(pool->ctx,
+ "Failed allocating memory on create log_sz: %d\n", log_range);
+ kfree(pool->db.buddy_manager);
+ return -ENOMEM;
+ }
+ }
+
+ pool->p_db_uninit = &hws_pool_buddy_db_uninit;
+ pool->p_get_chunk = &hws_pool_buddy_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_buddy_db_put_chunk;
+
+ return 0;
+}
+
+static int hws_pool_create_resource_on_index(struct mlx5hws_pool *pool,
+ u32 alloc_size, int idx)
+{
+ int ret = hws_pool_resource_alloc(pool, alloc_size, idx);
+
+ if (ret) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+ pool->type, alloc_size, idx);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct mlx5hws_pool_elements *
+hws_pool_element_create_new_elem(struct mlx5hws_pool *pool, u32 order, int idx)
+{
+ struct mlx5hws_pool_elements *elem;
+ u32 alloc_size;
+
+ alloc_size = pool->alloc_log_sz;
+
+ elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+ if (!elem)
+ return NULL;
+
+ /* Sharing the same resource, also means that all the elements are with size 1 */
+ if ((pool->flags & MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS) &&
+ !(pool->flags & MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) {
+ /* Currently all chunks in size 1 */
+ elem->bitmap = hws_pool_create_and_init_bitmap(alloc_size - order);
+ if (!elem->bitmap) {
+ mlx5hws_err(pool->ctx,
+ "Failed to create bitmap type: %d: size %d index: %d\n",
+ pool->type, alloc_size, idx);
+ goto free_elem;
+ }
+
+ elem->log_size = alloc_size - order;
+ }
+
+ if (hws_pool_create_resource_on_index(pool, alloc_size, idx)) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+ pool->type, alloc_size, idx);
+ goto free_db;
+ }
+
+ pool->db.element_manager->elements[idx] = elem;
+
+ return elem;
+
+free_db:
+ bitmap_free(elem->bitmap);
+free_elem:
+ kfree(elem);
+ return NULL;
+}
+
+static int hws_pool_element_find_seg(struct mlx5hws_pool_elements *elem, int *seg)
+{
+ unsigned int segment, size;
+
+ size = 1 << elem->log_size;
+
+ segment = find_first_bit(elem->bitmap, size);
+ if (segment >= size) {
+ elem->is_full = true;
+ return -ENOMEM;
+ }
+
+ bitmap_clear(elem->bitmap, segment, 1);
+ *seg = segment;
+ return 0;
+}
+
+static int
+hws_pool_onesize_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
+ u32 *idx, int *seg)
+{
+ struct mlx5hws_pool_elements *elem;
+
+ elem = pool->db.element_manager->elements[0];
+ if (!elem)
+ elem = hws_pool_element_create_new_elem(pool, order, 0);
+ if (!elem)
+ goto err_no_elem;
+
+ if (hws_pool_element_find_seg(elem, seg) != 0) {
+ mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
+ return -ENOMEM;
+ }
+
+ *idx = 0;
+ elem->num_of_elements++;
+ return 0;
+
+err_no_elem:
+ mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
+ return -ENOMEM;
+}
+
+static int
+hws_pool_general_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
+ u32 *idx, int *seg)
+{
+ int ret, i;
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ if (!pool->resource[i]) {
+ ret = hws_pool_create_resource_on_index(pool, order, i);
+ if (ret)
+ goto err_no_res;
+ *idx = i;
+ *seg = 0; /* One memory slot in that element */
+ return 0;
+ }
+ }
+
+ mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
+ return -ENOMEM;
+
+err_no_res:
+ mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
+ return -ENOMEM;
+}
+
+static int hws_pool_general_element_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret;
+
+ /* Go over all memory elements and find/allocate free slot */
+ ret = hws_pool_general_element_get_mem_chunk(pool, chunk->order,
+ &chunk->resource_idx,
+ &chunk->offset);
+ if (ret)
+ mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+ chunk->order);
+
+ return ret;
+}
+
+static void hws_pool_general_element_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ if (unlikely(!pool->resource[chunk->resource_idx]))
+ pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE)
+ hws_pool_resource_free(pool, chunk->resource_idx);
+}
+
+static void hws_pool_general_element_db_uninit(struct mlx5hws_pool *pool)
+{
+ (void)pool;
+}
+
+/* This memory management works as the following:
+ * - At start doesn't allocate no mem at all.
+ * - When new request for chunk arrived:
+ * allocate resource and give it.
+ * - When free that chunk:
+ * the resource is freed.
+ */
+static int hws_pool_general_element_db_init(struct mlx5hws_pool *pool)
+{
+ pool->p_db_uninit = &hws_pool_general_element_db_uninit;
+ pool->p_get_chunk = &hws_pool_general_element_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_general_element_db_put_chunk;
+
+ return 0;
+}
+
+static void hws_onesize_element_db_destroy_element(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_elements *elem,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ if (unlikely(!pool->resource[chunk->resource_idx]))
+ pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+ hws_pool_resource_free(pool, chunk->resource_idx);
+ kfree(elem);
+ pool->db.element_manager->elements[chunk->resource_idx] = NULL;
+}
+
+static void hws_onesize_element_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ struct mlx5hws_pool_elements *elem;
+
+ if (unlikely(chunk->resource_idx))
+ pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+ elem = pool->db.element_manager->elements[chunk->resource_idx];
+ if (!elem) {
+ mlx5hws_err(pool->ctx, "No such element (%d)\n", chunk->resource_idx);
+ return;
+ }
+
+ bitmap_set(elem->bitmap, chunk->offset, 1);
+ elem->is_full = false;
+ elem->num_of_elements--;
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE &&
+ !elem->num_of_elements)
+ hws_onesize_element_db_destroy_element(pool, elem, chunk);
+}
+
+static int hws_onesize_element_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret = 0;
+
+ /* Go over all memory elements and find/allocate free slot */
+ ret = hws_pool_onesize_element_get_mem_chunk(pool, chunk->order,
+ &chunk->resource_idx,
+ &chunk->offset);
+ if (ret)
+ mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+ chunk->order);
+
+ return ret;
+}
+
+static void hws_onesize_element_db_uninit(struct mlx5hws_pool *pool)
+{
+ struct mlx5hws_pool_elements *elem;
+ int i;
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ elem = pool->db.element_manager->elements[i];
+ if (elem) {
+ bitmap_free(elem->bitmap);
+ kfree(elem);
+ pool->db.element_manager->elements[i] = NULL;
+ }
+ }
+ kfree(pool->db.element_manager);
+}
+
+/* This memory management works as the following:
+ * - At start doesn't allocate no mem at all.
+ * - When new request for chunk arrived:
+ * aloocate the first and only slot of memory/resource
+ * when it ended return error.
+ */
+static int hws_pool_onesize_element_db_init(struct mlx5hws_pool *pool)
+{
+ pool->db.element_manager = kzalloc(sizeof(*pool->db.element_manager), GFP_KERNEL);
+ if (!pool->db.element_manager)
+ return -ENOMEM;
+
+ pool->p_db_uninit = &hws_onesize_element_db_uninit;
+ pool->p_get_chunk = &hws_onesize_element_db_get_chunk;
+ pool->p_put_chunk = &hws_onesize_element_db_put_chunk;
+
+ return 0;
+}
+
+static int hws_pool_db_init(struct mlx5hws_pool *pool,
+ enum mlx5hws_db_type db_type)
+{
+ int ret;
+
+ if (db_type == MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE)
+ ret = hws_pool_general_element_db_init(pool);
+ else if (db_type == MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE)
+ ret = hws_pool_onesize_element_db_init(pool);
+ else
+ ret = hws_pool_buddy_db_init(pool, pool->alloc_log_sz);
+
+ if (ret) {
+ mlx5hws_err(pool->ctx, "Failed to init general db : %d (ret: %d)\n", db_type, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hws_pool_db_unint(struct mlx5hws_pool *pool)
+{
+ pool->p_db_uninit(pool);
+}
+
+int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->p_get_chunk(pool, chunk);
+ mutex_unlock(&pool->lock);
+
+ return ret;
+}
+
+void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ mutex_lock(&pool->lock);
+ pool->p_put_chunk(pool, chunk);
+ mutex_unlock(&pool->lock);
+}
+
+struct mlx5hws_pool *
+mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_attr)
+{
+ enum mlx5hws_db_type res_db_type;
+ struct mlx5hws_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->ctx = ctx;
+ pool->type = pool_attr->pool_type;
+ pool->alloc_log_sz = pool_attr->alloc_log_sz;
+ pool->flags = pool_attr->flags;
+ pool->tbl_type = pool_attr->table_type;
+ pool->opt_type = pool_attr->opt_type;
+
+ /* Support general db */
+ if (pool->flags == (MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK))
+ res_db_type = MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE;
+ else if (pool->flags == (MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS))
+ res_db_type = MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE;
+ else
+ res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY;
+
+ pool->alloc_log_sz = pool_attr->alloc_log_sz;
+
+ if (hws_pool_db_init(pool, res_db_type))
+ goto free_pool;
+
+ mutex_init(&pool->lock);
+
+ return pool;
+
+free_pool:
+ kfree(pool);
+ return NULL;
+}
+
+int mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
+{
+ int i;
+
+ mutex_destroy(&pool->lock);
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++)
+ if (pool->resource[i])
+ hws_pool_resource_free(pool, i);
+
+ hws_pool_db_unint(pool);
+
+ kfree(pool);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h
new file mode 100644
index 000000000000..621298b352b2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_POOL_H_
+#define MLX5HWS_POOL_H_
+
+#define MLX5HWS_POOL_STC_LOG_SZ 15
+
+#define MLX5HWS_POOL_RESOURCE_ARR_SZ 100
+
+enum mlx5hws_pool_type {
+ MLX5HWS_POOL_TYPE_STE,
+ MLX5HWS_POOL_TYPE_STC,
+};
+
+struct mlx5hws_pool_chunk {
+ u32 resource_idx;
+ /* Internal offset, relative to base index */
+ int offset;
+ int order;
+};
+
+struct mlx5hws_pool_resource {
+ struct mlx5hws_pool *pool;
+ u32 base_id;
+ u32 range;
+};
+
+enum mlx5hws_pool_flags {
+ /* Only a one resource in that pool */
+ MLX5HWS_POOL_FLAGS_ONE_RESOURCE = 1 << 0,
+ MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1,
+ /* No sharing resources between chunks */
+ MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2,
+ /* All objects are in the same size */
+ MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3,
+ /* Managed by buddy allocator */
+ MLX5HWS_POOL_FLAGS_BUDDY_MANAGED = 1 << 4,
+ /* Allocate pool_type memory on pool creation */
+ MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5,
+
+ /* These values should be used by the caller */
+ MLX5HWS_POOL_FLAGS_FOR_STC_POOL =
+ MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS,
+ MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL =
+ MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK,
+ MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL =
+ MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_BUDDY_MANAGED |
+ MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE,
+};
+
+enum mlx5hws_pool_optimize {
+ MLX5HWS_POOL_OPTIMIZE_NONE = 0x0,
+ MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1,
+ MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2,
+};
+
+struct mlx5hws_pool_attr {
+ enum mlx5hws_pool_type pool_type;
+ enum mlx5hws_table_type table_type;
+ enum mlx5hws_pool_flags flags;
+ enum mlx5hws_pool_optimize opt_type;
+ /* Allocation size once memory is depleted */
+ size_t alloc_log_sz;
+};
+
+enum mlx5hws_db_type {
+ /* Uses for allocating chunk of big memory, each element has its own resource in the FW*/
+ MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE,
+ /* One resource only, all the elements are with same one size */
+ MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE,
+ /* Many resources, the memory allocated with buddy mechanism */
+ MLX5HWS_POOL_DB_TYPE_BUDDY,
+};
+
+struct mlx5hws_buddy_manager {
+ struct mlx5hws_buddy_mem *buddies[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+};
+
+struct mlx5hws_pool_elements {
+ u32 num_of_elements;
+ unsigned long *bitmap;
+ u32 log_size;
+ bool is_full;
+};
+
+struct mlx5hws_element_manager {
+ struct mlx5hws_pool_elements *elements[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+};
+
+struct mlx5hws_pool_db {
+ enum mlx5hws_db_type type;
+ union {
+ struct mlx5hws_element_manager *element_manager;
+ struct mlx5hws_buddy_manager *buddy_manager;
+ };
+};
+
+typedef int (*mlx5hws_pool_db_get_chunk)(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+typedef void (*mlx5hws_pool_db_put_chunk)(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+typedef void (*mlx5hws_pool_unint_db)(struct mlx5hws_pool *pool);
+
+struct mlx5hws_pool {
+ struct mlx5hws_context *ctx;
+ enum mlx5hws_pool_type type;
+ enum mlx5hws_pool_flags flags;
+ struct mutex lock; /* protect the pool */
+ size_t alloc_log_sz;
+ enum mlx5hws_table_type tbl_type;
+ enum mlx5hws_pool_optimize opt_type;
+ struct mlx5hws_pool_resource *resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+ struct mlx5hws_pool_resource *mirror_resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+ /* DB */
+ struct mlx5hws_pool_db db;
+ /* Functions */
+ mlx5hws_pool_unint_db p_db_uninit;
+ mlx5hws_pool_db_get_chunk p_get_chunk;
+ mlx5hws_pool_db_put_chunk p_put_chunk;
+};
+
+struct mlx5hws_pool *
+mlx5hws_pool_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_pool_attr *pool_attr);
+
+int mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
+
+int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+
+void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+
+static inline u32
+mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ return pool->resource[chunk->resource_idx]->base_id;
+}
+
+static inline u32
+mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ return pool->mirror_resource[chunk->resource_idx]->base_id;
+}
+#endif /* MLX5HWS_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h
new file mode 100644
index 000000000000..de92cecbeb92
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h
@@ -0,0 +1,514 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5_PRM_H_
+#define MLX5_PRM_H_
+
+#define MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY 512
+
+/* Action type of header modification. */
+enum {
+ MLX5_MODIFICATION_TYPE_SET = 0x1,
+ MLX5_MODIFICATION_TYPE_ADD = 0x2,
+ MLX5_MODIFICATION_TYPE_COPY = 0x3,
+ MLX5_MODIFICATION_TYPE_INSERT = 0x4,
+ MLX5_MODIFICATION_TYPE_REMOVE = 0x5,
+ MLX5_MODIFICATION_TYPE_NOP = 0x6,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS = 0x7,
+ MLX5_MODIFICATION_TYPE_ADD_FIELD = 0x8,
+ MLX5_MODIFICATION_TYPE_MAX,
+};
+
+/* The field of packet to be modified. */
+enum mlx5_modification_field {
+ MLX5_MODI_OUT_NONE = -1,
+ MLX5_MODI_OUT_SMAC_47_16 = 1,
+ MLX5_MODI_OUT_SMAC_15_0,
+ MLX5_MODI_OUT_ETHERTYPE,
+ MLX5_MODI_OUT_DMAC_47_16,
+ MLX5_MODI_OUT_DMAC_15_0,
+ MLX5_MODI_OUT_IP_DSCP,
+ MLX5_MODI_OUT_TCP_FLAGS,
+ MLX5_MODI_OUT_TCP_SPORT,
+ MLX5_MODI_OUT_TCP_DPORT,
+ MLX5_MODI_OUT_IPV4_TTL,
+ MLX5_MODI_OUT_UDP_SPORT,
+ MLX5_MODI_OUT_UDP_DPORT,
+ MLX5_MODI_OUT_SIPV6_127_96,
+ MLX5_MODI_OUT_SIPV6_95_64,
+ MLX5_MODI_OUT_SIPV6_63_32,
+ MLX5_MODI_OUT_SIPV6_31_0,
+ MLX5_MODI_OUT_DIPV6_127_96,
+ MLX5_MODI_OUT_DIPV6_95_64,
+ MLX5_MODI_OUT_DIPV6_63_32,
+ MLX5_MODI_OUT_DIPV6_31_0,
+ MLX5_MODI_OUT_SIPV4,
+ MLX5_MODI_OUT_DIPV4,
+ MLX5_MODI_OUT_FIRST_VID,
+ MLX5_MODI_IN_SMAC_47_16 = 0x31,
+ MLX5_MODI_IN_SMAC_15_0,
+ MLX5_MODI_IN_ETHERTYPE,
+ MLX5_MODI_IN_DMAC_47_16,
+ MLX5_MODI_IN_DMAC_15_0,
+ MLX5_MODI_IN_IP_DSCP,
+ MLX5_MODI_IN_TCP_FLAGS,
+ MLX5_MODI_IN_TCP_SPORT,
+ MLX5_MODI_IN_TCP_DPORT,
+ MLX5_MODI_IN_IPV4_TTL,
+ MLX5_MODI_IN_UDP_SPORT,
+ MLX5_MODI_IN_UDP_DPORT,
+ MLX5_MODI_IN_SIPV6_127_96,
+ MLX5_MODI_IN_SIPV6_95_64,
+ MLX5_MODI_IN_SIPV6_63_32,
+ MLX5_MODI_IN_SIPV6_31_0,
+ MLX5_MODI_IN_DIPV6_127_96,
+ MLX5_MODI_IN_DIPV6_95_64,
+ MLX5_MODI_IN_DIPV6_63_32,
+ MLX5_MODI_IN_DIPV6_31_0,
+ MLX5_MODI_IN_SIPV4,
+ MLX5_MODI_IN_DIPV4,
+ MLX5_MODI_OUT_IPV6_HOPLIMIT,
+ MLX5_MODI_IN_IPV6_HOPLIMIT,
+ MLX5_MODI_META_DATA_REG_A,
+ MLX5_MODI_META_DATA_REG_B = 0x50,
+ MLX5_MODI_META_REG_C_0,
+ MLX5_MODI_META_REG_C_1,
+ MLX5_MODI_META_REG_C_2,
+ MLX5_MODI_META_REG_C_3,
+ MLX5_MODI_META_REG_C_4,
+ MLX5_MODI_META_REG_C_5,
+ MLX5_MODI_META_REG_C_6,
+ MLX5_MODI_META_REG_C_7,
+ MLX5_MODI_OUT_TCP_SEQ_NUM,
+ MLX5_MODI_IN_TCP_SEQ_NUM,
+ MLX5_MODI_OUT_TCP_ACK_NUM,
+ MLX5_MODI_IN_TCP_ACK_NUM = 0x5C,
+ MLX5_MODI_GTP_TEID = 0x6E,
+ MLX5_MODI_OUT_IP_ECN = 0x73,
+ MLX5_MODI_TUNNEL_HDR_DW_1 = 0x75,
+ MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76,
+ MLX5_MODI_HASH_RESULT = 0x81,
+ MLX5_MODI_IN_MPLS_LABEL_0 = 0x8a,
+ MLX5_MODI_IN_MPLS_LABEL_1,
+ MLX5_MODI_IN_MPLS_LABEL_2,
+ MLX5_MODI_IN_MPLS_LABEL_3,
+ MLX5_MODI_IN_MPLS_LABEL_4,
+ MLX5_MODI_OUT_IP_PROTOCOL = 0x4A,
+ MLX5_MODI_OUT_IPV6_NEXT_HDR = 0x4A,
+ MLX5_MODI_META_REG_C_8 = 0x8F,
+ MLX5_MODI_META_REG_C_9 = 0x90,
+ MLX5_MODI_META_REG_C_10 = 0x91,
+ MLX5_MODI_META_REG_C_11 = 0x92,
+ MLX5_MODI_META_REG_C_12 = 0x93,
+ MLX5_MODI_META_REG_C_13 = 0x94,
+ MLX5_MODI_META_REG_C_14 = 0x95,
+ MLX5_MODI_META_REG_C_15 = 0x96,
+ MLX5_MODI_OUT_IPV4_TOTAL_LEN = 0x11D,
+ MLX5_MODI_OUT_IPV6_PAYLOAD_LEN = 0x11E,
+ MLX5_MODI_OUT_IPV4_IHL = 0x11F,
+ MLX5_MODI_OUT_TCP_DATA_OFFSET = 0x120,
+ MLX5_MODI_OUT_ESP_SPI = 0x5E,
+ MLX5_MODI_OUT_ESP_SEQ_NUM = 0x82,
+ MLX5_MODI_OUT_IPSEC_NEXT_HDR = 0x126,
+ MLX5_MODI_INVALID = INT_MAX,
+};
+
+enum {
+ MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE = 0x7 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE = 0x8 << 1,
+ MLX5_SET_HCA_CAP_OP_MOD_ESW = 0x9 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE = 0x1B << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 = 0x20 << 1,
+};
+
+enum mlx5_ifc_rtc_update_mode {
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH = 0x0,
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET = 0x1,
+};
+
+enum mlx5_ifc_rtc_access_mode {
+ MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH = 0x0,
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR = 0x1,
+};
+
+enum mlx5_ifc_rtc_ste_format {
+ MLX5_IFC_RTC_STE_FORMAT_8DW = 0x4,
+ MLX5_IFC_RTC_STE_FORMAT_11DW = 0x5,
+ MLX5_IFC_RTC_STE_FORMAT_RANGE = 0x7,
+};
+
+enum mlx5_ifc_rtc_reparse_mode {
+ MLX5_IFC_RTC_REPARSE_NEVER = 0x0,
+ MLX5_IFC_RTC_REPARSE_ALWAYS = 0x1,
+ MLX5_IFC_RTC_REPARSE_BY_STC = 0x2,
+};
+
+#define MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX 16
+
+struct mlx5_ifc_rtc_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x40];
+ u8 update_index_mode[0x2];
+ u8 reparse_mode[0x2];
+ u8 num_match_ste[0x4];
+ u8 pd[0x18];
+ u8 reserved_at_a0[0x9];
+ u8 access_index_mode[0x3];
+ u8 num_hash_definer[0x4];
+ u8 update_method[0x1];
+ u8 reserved_at_b1[0x2];
+ u8 log_depth[0x5];
+ u8 log_hash_size[0x8];
+ u8 ste_format_0[0x8];
+ u8 table_type[0x8];
+ u8 ste_format_1[0x8];
+ u8 reserved_at_d8[0x8];
+ u8 match_definer_0[0x20];
+ u8 stc_id[0x20];
+ u8 ste_table_base_id[0x20];
+ u8 ste_table_offset[0x20];
+ u8 reserved_at_160[0x8];
+ u8 miss_flow_table_id[0x18];
+ u8 match_definer_1[0x20];
+ u8 reserved_at_1a0[0x260];
+};
+
+enum mlx5_ifc_stc_action_type {
+ MLX5_IFC_STC_ACTION_TYPE_NOP = 0x00,
+ MLX5_IFC_STC_ACTION_TYPE_COPY = 0x05,
+ MLX5_IFC_STC_ACTION_TYPE_SET = 0x06,
+ MLX5_IFC_STC_ACTION_TYPE_ADD = 0x07,
+ MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS = 0x08,
+ MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE = 0x09,
+ MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT = 0x0b,
+ MLX5_IFC_STC_ACTION_TYPE_TAG = 0x0c,
+ MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST = 0x0e,
+ MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION = 0x10,
+ MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION = 0x11,
+ MLX5_IFC_STC_ACTION_TYPE_ASO = 0x12,
+ MLX5_IFC_STC_ACTION_TYPE_TRAILER = 0x13,
+ MLX5_IFC_STC_ACTION_TYPE_COUNTER = 0x14,
+ MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD = 0x1b,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE = 0x80,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR = 0x81,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT = 0x82,
+ MLX5_IFC_STC_ACTION_TYPE_DROP = 0x83,
+ MLX5_IFC_STC_ACTION_TYPE_ALLOW = 0x84,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT = 0x85,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK = 0x86,
+};
+
+enum mlx5_ifc_stc_reparse_mode {
+ MLX5_IFC_STC_REPARSE_IGNORE = 0x0,
+ MLX5_IFC_STC_REPARSE_NEVER = 0x1,
+ MLX5_IFC_STC_REPARSE_ALWAYS = 0x2,
+};
+
+struct mlx5_ifc_stc_ste_param_ste_table_bits {
+ u8 ste_obj_id[0x20];
+ u8 match_definer_id[0x20];
+ u8 reserved_at_40[0x3];
+ u8 log_hash_size[0x5];
+ u8 reserved_at_48[0x38];
+};
+
+struct mlx5_ifc_stc_ste_param_tir_bits {
+ u8 reserved_at_0[0x8];
+ u8 tirn[0x18];
+ u8 reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_stc_ste_param_table_bits {
+ u8 reserved_at_0[0x8];
+ u8 table_id[0x18];
+ u8 reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_stc_ste_param_flow_counter_bits {
+ u8 flow_counter_id[0x20];
+};
+
+enum {
+ MLX5_ASO_CT_NUM_PER_OBJ = 1,
+ MLX5_ASO_METER_NUM_PER_OBJ = 2,
+ MLX5_ASO_IPSEC_NUM_PER_OBJ = 1,
+ MLX5_ASO_FIRST_HIT_NUM_PER_OBJ = 512,
+};
+
+struct mlx5_ifc_stc_ste_param_execute_aso_bits {
+ u8 aso_object_id[0x20];
+ u8 return_reg_id[0x4];
+ u8 aso_type[0x4];
+ u8 reserved_at_28[0x18];
+};
+
+struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits {
+ u8 ipsec_object_id[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits {
+ u8 ipsec_object_id[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_trailer_bits {
+ u8 reserved_at_0[0x8];
+ u8 command[0x4];
+ u8 reserved_at_c[0x2];
+ u8 type[0x2];
+ u8 reserved_at_10[0xa];
+ u8 length[0x6];
+};
+
+struct mlx5_ifc_stc_ste_param_header_modify_list_bits {
+ u8 header_modify_pattern_id[0x20];
+ u8 header_modify_argument_id[0x20];
+};
+
+enum mlx5_ifc_header_anchors {
+ MLX5_HEADER_ANCHOR_PACKET_START = 0x0,
+ MLX5_HEADER_ANCHOR_MAC = 0x1,
+ MLX5_HEADER_ANCHOR_FIRST_VLAN_START = 0x2,
+ MLX5_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ MLX5_HEADER_ANCHOR_ESP = 0x08,
+ MLX5_HEADER_ANCHOR_TCP_UDP = 0x09,
+ MLX5_HEADER_ANCHOR_TUNNEL_HEADER = 0x0a,
+ MLX5_HEADER_ANCHOR_INNER_MAC = 0x13,
+ MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+ MLX5_HEADER_ANCHOR_INNER_TCP_UDP = 0x1a,
+ MLX5_HEADER_ANCHOR_L4_PAYLOAD = 0x1b,
+ MLX5_HEADER_ANCHOR_INNER_L4_PAYLOAD = 0x1c
+};
+
+struct mlx5_ifc_stc_ste_param_remove_bits {
+ u8 action_type[0x4];
+ u8 decap[0x1];
+ u8 reserved_at_5[0x5];
+ u8 remove_start_anchor[0x6];
+ u8 reserved_at_10[0x2];
+ u8 remove_end_anchor[0x6];
+ u8 reserved_at_18[0x8];
+};
+
+struct mlx5_ifc_stc_ste_param_remove_words_bits {
+ u8 action_type[0x4];
+ u8 reserved_at_4[0x6];
+ u8 remove_start_anchor[0x6];
+ u8 reserved_at_10[0x1];
+ u8 remove_offset[0x7];
+ u8 reserved_at_18[0x2];
+ u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_stc_ste_param_insert_bits {
+ u8 action_type[0x4];
+ u8 encap[0x1];
+ u8 inline_data[0x1];
+ u8 reserved_at_6[0x4];
+ u8 insert_anchor[0x6];
+ u8 reserved_at_10[0x1];
+ u8 insert_offset[0x7];
+ u8 reserved_at_18[0x1];
+ u8 insert_size[0x7];
+ u8 insert_argument[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_vport_bits {
+ u8 eswitch_owner_vhca_id[0x10];
+ u8 vport_number[0x10];
+ u8 eswitch_owner_vhca_id_valid[0x1];
+ u8 reserved_at_21[0x5f];
+};
+
+union mlx5_ifc_stc_param_bits {
+ struct mlx5_ifc_stc_ste_param_ste_table_bits ste_table;
+ struct mlx5_ifc_stc_ste_param_tir_bits tir;
+ struct mlx5_ifc_stc_ste_param_table_bits table;
+ struct mlx5_ifc_stc_ste_param_flow_counter_bits counter;
+ struct mlx5_ifc_stc_ste_param_header_modify_list_bits modify_header;
+ struct mlx5_ifc_stc_ste_param_execute_aso_bits aso;
+ struct mlx5_ifc_stc_ste_param_remove_bits remove_header;
+ struct mlx5_ifc_stc_ste_param_insert_bits insert_header;
+ struct mlx5_ifc_set_action_in_bits add;
+ struct mlx5_ifc_set_action_in_bits set;
+ struct mlx5_ifc_copy_action_in_bits copy;
+ struct mlx5_ifc_stc_ste_param_vport_bits vport;
+ struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits ipsec_encrypt;
+ struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits ipsec_decrypt;
+ struct mlx5_ifc_stc_ste_param_trailer_bits trailer;
+ u8 reserved_at_0[0x80];
+};
+
+enum {
+ MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC = BIT(0),
+};
+
+struct mlx5_ifc_stc_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x46];
+ u8 reparse_mode[0x2];
+ u8 table_type[0x8];
+ u8 ste_action_offset[0x8];
+ u8 action_type[0x8];
+ u8 reserved_at_a0[0x60];
+ union mlx5_ifc_stc_param_bits stc_param;
+ u8 reserved_at_180[0x280];
+};
+
+struct mlx5_ifc_ste_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x48];
+ u8 table_type[0x8];
+ u8 reserved_at_90[0x370];
+};
+
+struct mlx5_ifc_definer_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x50];
+ u8 format_id[0x10];
+ u8 reserved_at_60[0x60];
+ u8 format_select_dw3[0x8];
+ u8 format_select_dw2[0x8];
+ u8 format_select_dw1[0x8];
+ u8 format_select_dw0[0x8];
+ u8 format_select_dw7[0x8];
+ u8 format_select_dw6[0x8];
+ u8 format_select_dw5[0x8];
+ u8 format_select_dw4[0x8];
+ u8 reserved_at_100[0x18];
+ u8 format_select_dw8[0x8];
+ u8 reserved_at_120[0x20];
+ u8 format_select_byte3[0x8];
+ u8 format_select_byte2[0x8];
+ u8 format_select_byte1[0x8];
+ u8 format_select_byte0[0x8];
+ u8 format_select_byte7[0x8];
+ u8 format_select_byte6[0x8];
+ u8 format_select_byte5[0x8];
+ u8 format_select_byte4[0x8];
+ u8 reserved_at_180[0x40];
+ u8 ctrl[0xa0];
+ u8 match_mask[0x160];
+};
+
+struct mlx5_ifc_arg_bits {
+ u8 rsvd0[0x88];
+ u8 access_pd[0x18];
+};
+
+struct mlx5_ifc_header_modify_pattern_in_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 pattern_length[0x8];
+ u8 reserved_at_88[0x18];
+
+ u8 reserved_at_a0[0x60];
+
+ u8 pattern_data[MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY * 8];
+};
+
+struct mlx5_ifc_create_rtc_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_rtc_bits rtc;
+};
+
+struct mlx5_ifc_create_stc_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_stc_bits stc;
+};
+
+struct mlx5_ifc_create_ste_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_ste_bits ste;
+};
+
+struct mlx5_ifc_create_definer_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_definer_bits definer;
+};
+
+struct mlx5_ifc_create_arg_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_arg_bits arg;
+};
+
+struct mlx5_ifc_create_header_modify_pattern_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_header_modify_pattern_in_bits pattern;
+};
+
+struct mlx5_ifc_generate_wqe_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mode[0x10];
+ u8 reserved_at_40[0x40];
+ u8 reserved_at_80[0x8];
+ u8 pdn[0x18];
+ u8 reserved_at_a0[0x160];
+ u8 wqe_ctrl[0x80];
+ u8 wqe_gta_ctrl[0x180];
+ u8 wqe_gta_data_0[0x200];
+ u8 wqe_gta_data_1[0x200];
+};
+
+struct mlx5_ifc_generate_wqe_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x1c0];
+ u8 cqe_data[0x200];
+};
+
+enum mlx5_access_aso_opc_mod {
+ ASO_OPC_MOD_IPSEC = 0x0,
+ ASO_OPC_MOD_CONNECTION_TRACKING = 0x1,
+ ASO_OPC_MOD_POLICER = 0x2,
+ ASO_OPC_MOD_RACE_AVOIDANCE = 0x3,
+ ASO_OPC_MOD_FLOW_HIT = 0x4,
+};
+
+enum {
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION = BIT(0),
+ MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID = BIT(1),
+};
+
+enum {
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT = 0,
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1,
+};
+
+struct mlx5_ifc_alloc_packet_reformat_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_packet_reformat_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_packet_reformat_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+#endif /* MLX5_PRM_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c
new file mode 100644
index 000000000000..8a011b958b43
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+static void hws_rule_skip(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_match_template *mt,
+ u32 flow_source,
+ bool *skip_rx, bool *skip_tx)
+{
+ /* By default FDB rules are added to both RX and TX */
+ *skip_rx = false;
+ *skip_tx = false;
+
+ if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT) {
+ *skip_rx = true;
+ } else if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) {
+ *skip_tx = true;
+ } else {
+ /* If no flow source was set for current rule,
+ * check for flow source in matcher attributes.
+ */
+ if (matcher->attr.optimize_flow_src) {
+ *skip_tx =
+ matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE;
+ *skip_rx =
+ matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT;
+ return;
+ }
+ }
+}
+
+static void
+hws_rule_update_copy_tag(struct mlx5hws_rule *rule,
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
+ bool is_jumbo)
+{
+ struct mlx5hws_rule_match_tag *tag;
+
+ if (!mlx5hws_matcher_is_resizable(rule->matcher)) {
+ tag = &rule->tag;
+ } else {
+ struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
+ (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
+ tag = (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
+ }
+
+ if (is_jumbo)
+ memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ else
+ memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
+}
+
+static void hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe *dep_wqe,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ bool skip_rx, skip_tx;
+
+ dep_wqe->rule = rule;
+ dep_wqe->user_data = attr->user_data;
+ dep_wqe->direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
+ attr->rule_idx : 0;
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ hws_rule_skip(matcher, mt, attr->flow_source, &skip_rx, &skip_tx);
+
+ if (!skip_rx) {
+ dep_wqe->rtc_0 = matcher->match_ste.rtc_0_id;
+ dep_wqe->retry_rtc_0 = matcher->col_matcher ?
+ matcher->col_matcher->match_ste.rtc_0_id : 0;
+ } else {
+ dep_wqe->rtc_0 = 0;
+ dep_wqe->retry_rtc_0 = 0;
+ }
+
+ if (!skip_tx) {
+ dep_wqe->rtc_1 = matcher->match_ste.rtc_1_id;
+ dep_wqe->retry_rtc_1 = matcher->col_matcher ?
+ matcher->col_matcher->match_ste.rtc_1_id : 0;
+ } else {
+ dep_wqe->rtc_1 = 0;
+ dep_wqe->retry_rtc_1 = 0;
+ }
+ } else {
+ pr_warn("HWS: invalid tbl->type: %d\n", tbl->type);
+ }
+}
+
+static void hws_rule_move_get_rtc(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_matcher *dst_matcher = rule->matcher->resize_dst;
+
+ if (rule->resize_info->rtc_0) {
+ ste_attr->rtc_0 = dst_matcher->match_ste.rtc_0_id;
+ ste_attr->retry_rtc_0 = dst_matcher->col_matcher ?
+ dst_matcher->col_matcher->match_ste.rtc_0_id : 0;
+ }
+ if (rule->resize_info->rtc_1) {
+ ste_attr->rtc_1 = dst_matcher->match_ste.rtc_1_id;
+ ste_attr->retry_rtc_1 = dst_matcher->col_matcher ?
+ dst_matcher->col_matcher->match_ste.rtc_1_id : 0;
+ }
+}
+
+static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_rule *rule,
+ bool err,
+ void *user_data,
+ enum mlx5hws_rule_status rule_status_on_succ)
+{
+ enum mlx5hws_flow_op_status comp_status;
+
+ if (!err) {
+ comp_status = MLX5HWS_FLOW_OP_SUCCESS;
+ rule->status = rule_status_on_succ;
+ } else {
+ comp_status = MLX5HWS_FLOW_OP_ERROR;
+ rule->status = MLX5HWS_RULE_STATUS_FAILED;
+ }
+
+ mlx5hws_send_engine_inc_rule(queue);
+ mlx5hws_send_engine_gen_comp(queue, user_data, comp_status);
+}
+
+static void
+hws_rule_save_resize_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr,
+ bool is_update)
+{
+ if (!mlx5hws_matcher_is_resizable(rule->matcher))
+ return;
+
+ if (likely(!is_update)) {
+ rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
+ if (unlikely(!rule->resize_info)) {
+ pr_warn("HWS: resize info isn't allocated for rule\n");
+ return;
+ }
+
+ rule->resize_info->max_stes =
+ rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
+ rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ?
+ rule->matcher->action_ste[0].pool :
+ NULL;
+ rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ?
+ rule->matcher->action_ste[1].pool :
+ NULL;
+ }
+
+ memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
+ sizeof(rule->resize_info->ctrl_seg));
+ memcpy(rule->resize_info->data_seg, ste_attr->wqe_data,
+ sizeof(rule->resize_info->data_seg));
+}
+
+void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule)
+{
+ if (mlx5hws_matcher_is_resizable(rule->matcher) &&
+ rule->resize_info) {
+ kfree(rule->resize_info);
+ rule->resize_info = NULL;
+ }
+}
+
+static void
+hws_rule_save_delete_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_match_template *mt = rule->matcher->mt;
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+
+ if (mlx5hws_matcher_is_resizable(rule->matcher))
+ return;
+
+ if (is_jumbo)
+ memcpy(&rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ else
+ memcpy(&rule->tag.match, ste_attr->wqe_data->tag, MLX5HWS_MATCH_TAG_SZ);
+}
+
+static void
+hws_rule_clear_delete_info(struct mlx5hws_rule *rule)
+{
+ /* nothing to do here */
+}
+
+static void
+hws_rule_load_delete_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher))) {
+ ste_attr->wqe_tag = &rule->tag;
+ } else {
+ struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
+ (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
+ struct mlx5hws_rule_match_tag *tag =
+ (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
+ ste_attr->wqe_tag = tag;
+ }
+}
+
+static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_pool_chunk ste = {0};
+ int ret;
+
+ action_ste = &matcher->action_ste[action_ste_selector];
+ ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
+ ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
+ if (unlikely(ret)) {
+ mlx5hws_err(matcher->tbl->ctx,
+ "Failed to allocate STE for rule actions");
+ return ret;
+ }
+ rule->action_ste_idx = ste.offset;
+
+ return 0;
+}
+
+static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_pool_chunk ste = {0};
+ struct mlx5hws_pool *pool;
+ u8 max_stes;
+
+ if (mlx5hws_matcher_is_resizable(matcher)) {
+ /* Free the original action pool if rule was resized */
+ max_stes = rule->resize_info->max_stes;
+ pool = rule->resize_info->action_ste_pool[action_ste_selector];
+ } else {
+ max_stes = matcher->action_ste[action_ste_selector].max_stes;
+ pool = matcher->action_ste[action_ste_selector].pool;
+ }
+
+ /* This release is safe only when the rule match part was deleted */
+ ste.order = ilog2(roundup_pow_of_two(max_stes));
+ ste.offset = rule->action_ste_idx;
+
+ mlx5hws_pool_chunk_free(pool, &ste);
+}
+
+static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ int action_ste_idx;
+ int ret;
+
+ ret = hws_rule_alloc_action_ste_idx(rule, 0);
+ if (unlikely(ret))
+ return ret;
+
+ action_ste_idx = rule->action_ste_idx;
+
+ ret = hws_rule_alloc_action_ste_idx(rule, 1);
+ if (unlikely(ret)) {
+ hws_rule_free_action_ste_idx(rule, 0);
+ return ret;
+ }
+
+ /* Both pools have to return the same index */
+ if (unlikely(rule->action_ste_idx != action_ste_idx)) {
+ pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule)
+{
+ if (rule->action_ste_idx > -1) {
+ hws_rule_free_action_ste_idx(rule, 1);
+ hws_rule_free_action_ste_idx(rule, 0);
+ }
+}
+
+static void hws_rule_create_init(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr,
+ struct mlx5hws_actions_apply_data *apply,
+ bool is_update)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+
+ /* Init rule before reuse */
+ if (!is_update) {
+ /* In update we use these rtc's */
+ rule->rtc_0 = 0;
+ rule->rtc_1 = 0;
+ rule->action_ste_selector = 0;
+ } else {
+ rule->action_ste_selector = !rule->action_ste_selector;
+ }
+
+ rule->pending_wqes = 0;
+ rule->action_ste_idx = -1;
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
+
+ /* Init default send STE attributes */
+ ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr->send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr->send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr->send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+
+ /* Init default action apply */
+ apply->tbl_type = tbl->type;
+ apply->common_res = &ctx->common_res[tbl->type];
+ apply->jump_to_action_stc = matcher->action_ste[0].stc.offset;
+ apply->require_dep = 0;
+}
+
+static void hws_rule_move_init(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ /* Save the old RTC IDs to be later used in match STE delete */
+ rule->resize_info->rtc_0 = rule->rtc_0;
+ rule->resize_info->rtc_1 = rule->rtc_1;
+ rule->resize_info->rule_idx = attr->rule_idx;
+
+ rule->rtc_0 = 0;
+ rule->rtc_1 = 0;
+
+ rule->pending_wqes = 0;
+ rule->action_ste_idx = -1;
+ rule->action_ste_selector = 0;
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
+ rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
+}
+
+bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule)
+{
+ return mlx5hws_matcher_is_in_resize(rule->matcher) &&
+ rule->resize_info &&
+ rule->resize_info->state != MLX5HWS_RULE_RESIZE_STATE_IDLE;
+}
+
+static int hws_rule_create_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_action_template *at = &rule->matcher->at[at_idx];
+ struct mlx5hws_match_template *mt = &rule->matcher->mt[mt_idx];
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+ struct mlx5hws_actions_wqe_setter *setter;
+ struct mlx5hws_actions_apply_data apply;
+ struct mlx5hws_send_engine *queue;
+ u8 total_stes, action_stes;
+ bool is_update;
+ int i, ret;
+
+ is_update = !match_param;
+
+ setter = &at->setters[at->num_of_action_stes];
+ total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
+ action_stes = total_stes - 1;
+
+ queue = &ctx->send_queue[attr->queue_id];
+ if (unlikely(mlx5hws_send_engine_err(queue)))
+ return -EIO;
+
+ hws_rule_create_init(rule, &ste_attr, &apply, is_update);
+
+ /* Allocate dependent match WQE since rule might have dependent writes.
+ * The queued dependent WQE can be later aborted or kept as a dependency.
+ * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
+ */
+ dep_wqe = mlx5hws_send_add_new_dep_wqe(queue);
+ hws_rule_init_dep_wqe(dep_wqe, rule, mt, attr);
+
+ ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ ste_attr.wqe_data = &dep_wqe->wqe_data;
+ apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ apply.wqe_data = (__force __be32 *)&dep_wqe->wqe_data;
+ apply.rule_action = rule_actions;
+ apply.queue = queue;
+
+ if (action_stes) {
+ /* Allocate action STEs for rules that need more than match STE */
+ if (!is_update) {
+ ret = hws_rule_alloc_action_ste(rule, attr);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ return ret;
+ }
+ }
+ /* Skip RX/TX based on the dep_wqe init */
+ ste_attr.rtc_0 = dep_wqe->rtc_0 ?
+ matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1 ?
+ matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0;
+ /* Action STEs are written to a specific index last to first */
+ ste_attr.direct_index = rule->action_ste_idx + action_stes;
+ apply.next_direct_idx = ste_attr.direct_index;
+ } else {
+ apply.next_direct_idx = 0;
+ }
+
+ for (i = total_stes; i-- > 0;) {
+ mlx5hws_action_apply_setter(&apply, setter--, !i && is_jumbo);
+
+ if (i == 0) {
+ /* Handle last match STE.
+ * For hash split / linear lookup RTCs, packets reaching any STE
+ * will always match and perform the specified actions, which
+ * makes the tag irrelevant.
+ */
+ if (likely(!mlx5hws_matcher_is_insert_by_idx(matcher) && !is_update))
+ mlx5hws_definer_create_tag(match_param, mt->fc, mt->fc_sz,
+ (u8 *)dep_wqe->wqe_data.action);
+ else if (is_update)
+ hws_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo);
+
+ /* Rule has dependent WQEs, match dep_wqe is queued */
+ if (action_stes || apply.require_dep)
+ break;
+
+ /* Rule has no dependencies, abort dep_wqe and send WQE now */
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = dep_wqe->user_data;
+ ste_attr.send_attr.rule = dep_wqe->rule;
+ ste_attr.rtc_0 = dep_wqe->rtc_0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
+ ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
+ ste_attr.direct_index = dep_wqe->direct_index;
+ } else {
+ apply.next_direct_idx = --ste_attr.direct_index;
+ }
+
+ mlx5hws_send_ste(queue, &ste_attr);
+ }
+
+ /* Backup TAG on the rule for deletion and resize info for
+ * moving rules to a new matcher, only after insertion.
+ */
+ if (!is_update)
+ hws_rule_save_delete_info(rule, &ste_attr);
+
+ hws_rule_save_resize_info(rule, &ste_attr, is_update);
+ mlx5hws_send_engine_inc_rule(queue);
+
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ return 0;
+}
+
+static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_send_engine *queue;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ hws_rule_gen_comp(queue, rule, false,
+ attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
+
+ /* Rule failed now we can safely release action STEs */
+ mlx5hws_rule_free_action_ste(rule);
+
+ /* Clear complex tag */
+ hws_rule_clear_delete_info(rule);
+
+ /* Clear info that was saved for resizing */
+ mlx5hws_rule_clear_resize_info(rule);
+
+ /* If a rule that was indicated as burst (need to trigger HW) has failed
+ * insertion we won't ring the HW as nothing is being written to the WQ.
+ * In such case update the last WQE and ring the HW with that work
+ */
+ if (attr->burst)
+ return;
+
+ mlx5hws_send_all_dep_wqe(queue);
+ mlx5hws_send_engine_flush_queue(queue);
+}
+
+static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ if (unlikely(mlx5hws_send_engine_err(queue))) {
+ hws_rule_destroy_failed_hws(rule, attr);
+ return 0;
+ }
+
+ /* Rule is not completed yet */
+ if (rule->status == MLX5HWS_RULE_STATUS_CREATING)
+ return -EBUSY;
+
+ /* Rule failed and doesn't require cleanup */
+ if (rule->status == MLX5HWS_RULE_STATUS_FAILED) {
+ hws_rule_destroy_failed_hws(rule, attr);
+ return 0;
+ }
+
+ if (rule->skip_delete) {
+ /* Rule shouldn't be deleted in HW.
+ * Generate completion as if write succeeded, and we can
+ * safely release action STEs and clear resize info.
+ */
+ hws_rule_gen_comp(queue, rule, false,
+ attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
+
+ mlx5hws_rule_free_action_ste(rule);
+ mlx5hws_rule_clear_resize_info(rule);
+ return 0;
+ }
+
+ mlx5hws_send_engine_inc_rule(queue);
+
+ /* Send dependent WQE */
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ rule->status = MLX5HWS_RULE_STATUS_DELETING;
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = attr->user_data;
+
+ ste_attr.rtc_0 = rule->rtc_0;
+ ste_attr.rtc_1 = rule->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.wqe_ctrl = &wqe_ctrl;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
+ if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
+ ste_attr.direct_index = attr->rule_idx;
+
+ hws_rule_load_delete_info(rule, &ste_attr);
+ mlx5hws_send_ste(queue, &ste_attr);
+ hws_rule_clear_delete_info(rule);
+
+ return 0;
+}
+
+static int hws_rule_enqueue_precheck(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+
+ if (unlikely(!attr->user_data))
+ return -EINVAL;
+
+ /* Check if there is room in queue */
+ if (unlikely(mlx5hws_send_engine_full(&ctx->send_queue[attr->queue_id])))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int hws_rule_enqueue_precheck_move(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
+ return -EINVAL;
+
+ return hws_rule_enqueue_precheck(rule, attr);
+}
+
+static int hws_rule_enqueue_precheck_create(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ if (unlikely(mlx5hws_matcher_is_in_resize(rule->matcher)))
+ /* Matcher in resize - new rules are not allowed */
+ return -EAGAIN;
+
+ return hws_rule_enqueue_precheck(rule, attr);
+}
+
+static int hws_rule_enqueue_precheck_update(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+
+ if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher) &&
+ !matcher->attr.optimize_using_rule_idx &&
+ !mlx5hws_matcher_is_insert_by_idx(matcher))) {
+ return -EOPNOTSUPP;
+ }
+
+ if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
+ return -EBUSY;
+
+ return hws_rule_enqueue_precheck_create(rule, attr);
+}
+
+int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
+ void *queue_ptr,
+ void *user_data)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_wqe_gta_ctrl_seg empty_wqe_ctrl = {0};
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_send_engine *queue = queue_ptr;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+
+ mlx5hws_send_all_dep_wqe(queue);
+
+ rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_DELETING;
+
+ ste_attr.send_attr.fence = 0;
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.notify_hw = 1;
+ ste_attr.send_attr.user_data = user_data;
+ ste_attr.rtc_0 = rule->resize_info->rtc_0;
+ ste_attr.rtc_1 = rule->resize_info->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->resize_info->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->resize_info->rtc_1;
+ ste_attr.wqe_ctrl = &empty_wqe_ctrl;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
+
+ if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
+ ste_attr.direct_index = rule->resize_info->rule_idx;
+
+ hws_rule_load_delete_info(rule, &ste_attr);
+ mlx5hws_send_ste(queue, &ste_attr);
+
+ return 0;
+}
+
+int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+ int ret;
+
+ ret = hws_rule_enqueue_precheck_move(rule, attr);
+ if (unlikely(ret))
+ return ret;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ ret = mlx5hws_send_engine_err(queue);
+ if (ret)
+ return ret;
+
+ hws_rule_move_init(rule, attr);
+ hws_rule_move_get_rtc(rule, &ste_attr);
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.fence = 0;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = attr->user_data;
+
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.wqe_ctrl = (struct mlx5hws_wqe_gta_ctrl_seg *)rule->resize_info->ctrl_seg;
+ ste_attr.wqe_data = (struct mlx5hws_wqe_gta_data_seg_ste *)rule->resize_info->data_seg;
+ ste_attr.direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
+ attr->rule_idx : 0;
+
+ mlx5hws_send_ste(queue, &ste_attr);
+ mlx5hws_send_engine_inc_rule(queue);
+
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ return 0;
+}
+
+int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr,
+ struct mlx5hws_rule *rule_handle)
+{
+ int ret;
+
+ rule_handle->matcher = matcher;
+
+ ret = hws_rule_enqueue_precheck_create(rule_handle, attr);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(!(matcher->num_of_mt >= mt_idx) ||
+ !(matcher->num_of_at >= at_idx) ||
+ !match_param)) {
+ pr_warn("HWS: Invalid rule creation parameters (MTs, ATs or match params)\n");
+ return -EINVAL;
+ }
+
+ ret = hws_rule_create_hws(rule_handle,
+ attr,
+ mt_idx,
+ match_param,
+ at_idx,
+ rule_actions);
+
+ return ret;
+}
+
+int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ int ret;
+
+ ret = hws_rule_enqueue_precheck(rule, attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_rule_destroy_hws(rule, attr);
+
+ return ret;
+}
+
+int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr)
+{
+ int ret;
+
+ ret = hws_rule_enqueue_precheck_update(rule, attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_rule_create_hws(rule,
+ attr,
+ 0,
+ NULL,
+ at_idx,
+ rule_actions);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h
new file mode 100644
index 000000000000..495cdd17e9f3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_RULE_H_
+#define MLX5HWS_RULE_H_
+
+enum {
+ MLX5HWS_STE_CTRL_SZ = 20,
+ MLX5HWS_ACTIONS_SZ = 12,
+ MLX5HWS_MATCH_TAG_SZ = 32,
+ MLX5HWS_JUMBO_TAG_SZ = 44,
+};
+
+enum mlx5hws_rule_status {
+ MLX5HWS_RULE_STATUS_UNKNOWN,
+ MLX5HWS_RULE_STATUS_CREATING,
+ MLX5HWS_RULE_STATUS_CREATED,
+ MLX5HWS_RULE_STATUS_DELETING,
+ MLX5HWS_RULE_STATUS_DELETED,
+ MLX5HWS_RULE_STATUS_FAILING,
+ MLX5HWS_RULE_STATUS_FAILED,
+};
+
+enum mlx5hws_rule_move_state {
+ MLX5HWS_RULE_RESIZE_STATE_IDLE,
+ MLX5HWS_RULE_RESIZE_STATE_WRITING,
+ MLX5HWS_RULE_RESIZE_STATE_DELETING,
+};
+
+enum mlx5hws_rule_jumbo_match_tag_offset {
+ MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0 = 8,
+};
+
+struct mlx5hws_rule_match_tag {
+ union {
+ u8 jumbo[MLX5HWS_JUMBO_TAG_SZ];
+ struct {
+ u8 reserved[MLX5HWS_ACTIONS_SZ];
+ u8 match[MLX5HWS_MATCH_TAG_SZ];
+ };
+ };
+};
+
+struct mlx5hws_rule_resize_info {
+ struct mlx5hws_pool *action_ste_pool[2];
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 rule_idx;
+ u8 state;
+ u8 max_stes;
+ u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */
+ u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */
+};
+
+struct mlx5hws_rule {
+ struct mlx5hws_matcher *matcher;
+ union {
+ struct mlx5hws_rule_match_tag tag;
+ struct mlx5hws_rule_resize_info *resize_info;
+ };
+ u32 rtc_0; /* The RTC into which the STE was inserted */
+ u32 rtc_1; /* The RTC into which the STE was inserted */
+ int action_ste_idx; /* STE array index */
+ u8 status; /* enum mlx5hws_rule_status */
+ u8 action_ste_selector; /* For rule update - which action STE is in use */
+ u8 pending_wqes;
+ bool skip_delete; /* For complex rules - another rule with same tag
+ * still exists, so don't actually delete this rule.
+ */
+};
+
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule);
+
+int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
+ void *queue, void *user_data);
+
+int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule);
+
+void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule);
+
+#endif /* MLX5HWS_RULE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
new file mode 100644
index 000000000000..6d443e6ee8d9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
@@ -0,0 +1,1231 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+#include "lib/clock.h"
+
+enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
+
+struct mlx5hws_send_ring_dep_wqe *
+mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ unsigned int idx = send_sq->head_dep_idx++ & (queue->num_entries - 1);
+
+ memset(&send_sq->dep_wqe[idx].wqe_data.tag, 0, MLX5HWS_MATCH_TAG_SZ);
+
+ return &send_sq->dep_wqe[idx];
+}
+
+void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ queue->send_ring.send_sq.head_dep_idx--;
+}
+
+void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+
+ /* Fence first from previous depend WQEs */
+ ste_attr.send_attr.fence = 1;
+
+ while (send_sq->head_dep_idx != send_sq->tail_dep_idx) {
+ dep_wqe = &send_sq->dep_wqe[send_sq->tail_dep_idx++ & (queue->num_entries - 1)];
+
+ /* Notify HW on the last WQE */
+ ste_attr.send_attr.notify_hw = (send_sq->tail_dep_idx == send_sq->head_dep_idx);
+ ste_attr.send_attr.user_data = dep_wqe->user_data;
+ ste_attr.send_attr.rule = dep_wqe->rule;
+
+ ste_attr.rtc_0 = dep_wqe->rtc_0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1;
+ ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
+ ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
+ ste_attr.used_id_rtc_0 = &dep_wqe->rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &dep_wqe->rule->rtc_1;
+ ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ ste_attr.wqe_data = &dep_wqe->wqe_data;
+ ste_attr.direct_index = dep_wqe->direct_index;
+
+ mlx5hws_send_ste(queue, &ste_attr);
+
+ /* Fencing is done only on the first WQE */
+ ste_attr.send_attr.fence = 0;
+ }
+}
+
+struct mlx5hws_send_engine_post_ctrl
+mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+
+ ctrl.queue = queue;
+ /* Currently only one send ring is supported */
+ ctrl.send_ring = &queue->send_ring;
+ ctrl.num_wqebbs = 0;
+
+ return ctrl;
+}
+
+void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ char **buf, size_t *len)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &ctrl->send_ring->send_sq;
+ unsigned int idx;
+
+ idx = (send_sq->cur_post + ctrl->num_wqebbs) & send_sq->buf_mask;
+
+ /* Note that *buf is a single MLX5_SEND_WQE_BB. It cannot be used
+ * as buffer of more than one WQE_BB, since the two MLX5_SEND_WQE_BB
+ * can be on 2 different kernel memory pages.
+ */
+ *buf = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+ *len = MLX5_SEND_WQE_BB;
+
+ if (!ctrl->num_wqebbs) {
+ *buf += sizeof(struct mlx5hws_wqe_ctrl_seg);
+ *len -= sizeof(struct mlx5hws_wqe_ctrl_seg);
+ }
+
+ ctrl->num_wqebbs++;
+}
+
+static void hws_send_engine_post_ring(struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_wqe_ctrl_seg *doorbell_cseg)
+{
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ *sq->wq.db = cpu_to_be32(sq->cur_post);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)doorbell_cseg, sq->uar_map);
+
+ /* Ensure doorbell is written on uar_page before poll_cq */
+ WRITE_ONCE(doorbell_cseg, NULL);
+}
+
+static void
+hws_send_wqe_set_tag(struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
+ struct mlx5hws_rule_match_tag *tag,
+ bool is_jumbo)
+{
+ if (is_jumbo) {
+ /* Clear previous possibly dirty control */
+ memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ);
+ memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ } else {
+ /* Clear previous possibly dirty control and actions */
+ memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ + MLX5HWS_ACTIONS_SZ);
+ memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
+ }
+}
+
+void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ struct mlx5hws_send_engine_post_attr *attr)
+{
+ struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_ring_sq *sq;
+ unsigned int idx;
+ u32 flags = 0;
+
+ sq = &ctrl->send_ring->send_sq;
+ idx = sq->cur_post & sq->buf_mask;
+ sq->last_idx = idx;
+
+ wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, idx);
+
+ wqe_ctrl->opmod_idx_opcode =
+ cpu_to_be32((attr->opmod << 24) |
+ ((sq->cur_post & 0xffff) << 8) |
+ attr->opcode);
+ wqe_ctrl->qpn_ds =
+ cpu_to_be32((attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16 |
+ sq->sqn << 8);
+ wqe_ctrl->imm = cpu_to_be32(attr->id);
+
+ flags |= attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ flags |= attr->fence ? MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE : 0;
+ wqe_ctrl->flags = cpu_to_be32(flags);
+
+ sq->wr_priv[idx].id = attr->id;
+ sq->wr_priv[idx].retry_id = attr->retry_id;
+
+ sq->wr_priv[idx].rule = attr->rule;
+ sq->wr_priv[idx].user_data = attr->user_data;
+ sq->wr_priv[idx].num_wqebbs = ctrl->num_wqebbs;
+
+ if (attr->rule) {
+ sq->wr_priv[idx].rule->pending_wqes++;
+ sq->wr_priv[idx].used_id = attr->used_id;
+ }
+
+ sq->cur_post += ctrl->num_wqebbs;
+
+ if (attr->notify_hw)
+ hws_send_engine_post_ring(sq, wqe_ctrl);
+}
+
+static void hws_send_wqe(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_engine_post_attr *send_attr,
+ struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
+ void *send_wqe_data,
+ void *send_wqe_tag,
+ bool is_jumbo,
+ u8 gta_opcode,
+ u32 direct_index)
+{
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ size_t wqe_len;
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
+
+ wqe_ctrl->op_dirix = cpu_to_be32(gta_opcode << 28 | direct_index);
+ memcpy(wqe_ctrl->stc_ix, send_wqe_ctrl->stc_ix,
+ sizeof(send_wqe_ctrl->stc_ix));
+
+ if (send_wqe_data)
+ memcpy(wqe_data, send_wqe_data, sizeof(*wqe_data));
+ else
+ hws_send_wqe_set_tag(wqe_data, send_wqe_tag, is_jumbo);
+
+ mlx5hws_send_engine_post_end(&ctrl, send_attr);
+}
+
+void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
+ u8 notify_hw = send_attr->notify_hw;
+ u8 fence = send_attr->fence;
+
+ if (ste_attr->rtc_1) {
+ send_attr->id = ste_attr->rtc_1;
+ send_attr->used_id = ste_attr->used_id_rtc_1;
+ send_attr->retry_id = ste_attr->retry_rtc_1;
+ send_attr->fence = fence;
+ send_attr->notify_hw = notify_hw && !ste_attr->rtc_0;
+ hws_send_wqe(queue, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode,
+ ste_attr->direct_index);
+ }
+
+ if (ste_attr->rtc_0) {
+ send_attr->id = ste_attr->rtc_0;
+ send_attr->used_id = ste_attr->used_id_rtc_0;
+ send_attr->retry_id = ste_attr->retry_rtc_0;
+ send_attr->fence = fence && !ste_attr->rtc_1;
+ send_attr->notify_hw = notify_hw;
+ hws_send_wqe(queue, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode,
+ ste_attr->direct_index);
+ }
+
+ /* Restore to original requested values */
+ send_attr->notify_hw = notify_hw;
+ send_attr->fence = fence;
+}
+
+static void hws_send_engine_retry_post_send(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ u16 wqe_cnt)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ struct mlx5hws_send_ring_sq *send_sq;
+ unsigned int idx;
+ size_t wqe_len;
+ char *p;
+
+ send_attr.rule = priv->rule;
+ send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ send_attr.len = MLX5_SEND_WQE_BB * 2 - sizeof(struct mlx5hws_wqe_ctrl_seg);
+ send_attr.notify_hw = 1;
+ send_attr.fence = 0;
+ send_attr.user_data = priv->user_data;
+ send_attr.id = priv->retry_id;
+ send_attr.used_id = priv->used_id;
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
+
+ send_sq = &ctrl.send_ring->send_sq;
+ idx = wqe_cnt & send_sq->buf_mask;
+ p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+
+ /* Copy old gta ctrl */
+ memcpy(wqe_ctrl, p + sizeof(struct mlx5hws_wqe_ctrl_seg),
+ MLX5_SEND_WQE_BB - sizeof(struct mlx5hws_wqe_ctrl_seg));
+
+ idx = (wqe_cnt + 1) & send_sq->buf_mask;
+ p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+
+ /* Copy old gta data */
+ memcpy(wqe_data, p, MLX5_SEND_WQE_BB);
+
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+}
+
+void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *sq = &queue->send_ring.send_sq;
+ struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
+
+ wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, sq->last_idx);
+ wqe_ctrl->flags |= cpu_to_be32(MLX5_WQE_CTRL_CQ_UPDATE);
+
+ hws_send_engine_post_ring(sq, wqe_ctrl);
+}
+
+static void
+hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ enum mlx5hws_flow_op_status *status)
+{
+ switch (priv->rule->resize_info->state) {
+ case MLX5HWS_RULE_RESIZE_STATE_WRITING:
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
+ /* Backup original RTCs */
+ u32 orig_rtc_0 = priv->rule->resize_info->rtc_0;
+ u32 orig_rtc_1 = priv->rule->resize_info->rtc_1;
+
+ /* Delete partially failed move rule using resize_info */
+ priv->rule->resize_info->rtc_0 = priv->rule->rtc_0;
+ priv->rule->resize_info->rtc_1 = priv->rule->rtc_1;
+
+ /* Move rule to original RTC for future delete */
+ priv->rule->rtc_0 = orig_rtc_0;
+ priv->rule->rtc_1 = orig_rtc_1;
+ }
+ /* Clean leftovers */
+ mlx5hws_rule_move_hws_remove(priv->rule, queue, priv->user_data);
+ break;
+
+ case MLX5HWS_RULE_RESIZE_STATE_DELETING:
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
+ *status = MLX5HWS_FLOW_OP_ERROR;
+ } else {
+ *status = MLX5HWS_FLOW_OP_SUCCESS;
+ priv->rule->matcher = priv->rule->matcher->resize_dst;
+ }
+ priv->rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_IDLE;
+ priv->rule->status = MLX5HWS_RULE_STATUS_CREATED;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ u16 wqe_cnt,
+ enum mlx5hws_flow_op_status *status)
+{
+ priv->rule->pending_wqes--;
+
+ if (*status == MLX5HWS_FLOW_OP_ERROR) {
+ if (priv->retry_id) {
+ hws_send_engine_retry_post_send(queue, priv, wqe_cnt);
+ return;
+ }
+ /* Some part of the rule failed */
+ priv->rule->status = MLX5HWS_RULE_STATUS_FAILING;
+ *priv->used_id = 0;
+ } else {
+ *priv->used_id = priv->id;
+ }
+
+ /* Update rule status for the last completion */
+ if (!priv->rule->pending_wqes) {
+ if (unlikely(mlx5hws_rule_move_in_progress(priv->rule))) {
+ hws_send_engine_update_rule_resize(queue, priv, status);
+ return;
+ }
+
+ if (unlikely(priv->rule->status == MLX5HWS_RULE_STATUS_FAILING)) {
+ /* Rule completely failed and doesn't require cleanup */
+ if (!priv->rule->rtc_0 && !priv->rule->rtc_1)
+ priv->rule->status = MLX5HWS_RULE_STATUS_FAILED;
+
+ *status = MLX5HWS_FLOW_OP_ERROR;
+ } else {
+ /* Increase the status, this only works on good flow as the enum
+ * is arrange it away creating -> created -> deleting -> deleted
+ */
+ priv->rule->status++;
+ *status = MLX5HWS_FLOW_OP_SUCCESS;
+ /* Rule was deleted now we can safely release action STEs
+ * and clear resize info
+ */
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) {
+ mlx5hws_rule_free_action_ste(priv->rule);
+ mlx5hws_rule_clear_resize_info(priv->rule);
+ }
+ }
+ }
+}
+
+static void hws_send_engine_update(struct mlx5hws_send_engine *queue,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5hws_send_ring_priv *priv,
+ struct mlx5hws_flow_op_result res[],
+ s64 *i,
+ u32 res_nb,
+ u16 wqe_cnt)
+{
+ enum mlx5hws_flow_op_status status;
+
+ if (!cqe || (likely(be32_to_cpu(cqe->byte_cnt) >> 31 == 0) &&
+ likely(get_cqe_opcode(cqe) == MLX5_CQE_REQ))) {
+ status = MLX5HWS_FLOW_OP_SUCCESS;
+ } else {
+ status = MLX5HWS_FLOW_OP_ERROR;
+ }
+
+ if (priv->user_data) {
+ if (priv->rule) {
+ hws_send_engine_update_rule(queue, priv, wqe_cnt, &status);
+ /* Completion is provided on the last rule WQE */
+ if (priv->rule->pending_wqes)
+ return;
+ }
+
+ if (*i < res_nb) {
+ res[*i].user_data = priv->user_data;
+ res[*i].status = status;
+ (*i)++;
+ mlx5hws_send_engine_dec_rule(queue);
+ } else {
+ mlx5hws_send_engine_gen_comp(queue, priv->user_data, status);
+ }
+ }
+}
+
+static int mlx5hws_parse_cqe(struct mlx5hws_send_ring_cq *cq,
+ struct mlx5_cqe64 *cqe64)
+{
+ if (unlikely(get_cqe_opcode(cqe64) != MLX5_CQE_REQ)) {
+ struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe64;
+
+ mlx5_core_err(cq->mdev, "Bad OP in HWS SQ CQE: 0x%x\n", get_cqe_opcode(cqe64));
+ mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n", err_cqe->vendor_err_synd);
+ mlx5_core_err(cq->mdev, "syndrome=%x\n", err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
+ 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+ return CQ_POLL_ERR;
+ }
+
+ return CQ_OK;
+}
+
+static int mlx5hws_cq_poll_one(struct mlx5hws_send_ring_cq *cq)
+{
+ struct mlx5_cqe64 *cqe64;
+ int err;
+
+ cqe64 = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe64) {
+ if (unlikely(cq->mdev->state ==
+ MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
+ mlx5_core_dbg_once(cq->mdev,
+ "Polling CQ while device is shutting down\n");
+ return CQ_POLL_ERR;
+ }
+ return CQ_EMPTY;
+ }
+
+ mlx5_cqwq_pop(&cq->wq);
+ err = mlx5hws_parse_cqe(cq, cqe64);
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ return err;
+}
+
+static void hws_send_engine_poll_cq(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ s64 *polled,
+ u32 res_nb)
+{
+ struct mlx5hws_send_ring *send_ring = &queue->send_ring;
+ struct mlx5hws_send_ring_cq *cq = &send_ring->send_cq;
+ struct mlx5hws_send_ring_sq *sq = &send_ring->send_sq;
+ struct mlx5hws_send_ring_priv *priv;
+ struct mlx5_cqe64 *cqe;
+ u8 cqe_opcode;
+ u16 wqe_cnt;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return;
+
+ cqe_opcode = get_cqe_opcode(cqe);
+ if (cqe_opcode == MLX5_CQE_INVALID)
+ return;
+
+ if (unlikely(cqe_opcode != MLX5_CQE_REQ))
+ queue->err = true;
+
+ wqe_cnt = be16_to_cpu(cqe->wqe_counter) & sq->buf_mask;
+
+ while (cq->poll_wqe != wqe_cnt) {
+ priv = &sq->wr_priv[cq->poll_wqe];
+ hws_send_engine_update(queue, NULL, priv, res, polled, res_nb, 0);
+ cq->poll_wqe = (cq->poll_wqe + priv->num_wqebbs) & sq->buf_mask;
+ }
+
+ priv = &sq->wr_priv[wqe_cnt];
+ cq->poll_wqe = (wqe_cnt + priv->num_wqebbs) & sq->buf_mask;
+ hws_send_engine_update(queue, cqe, priv, res, polled, res_nb, wqe_cnt);
+ mlx5hws_cq_poll_one(cq);
+}
+
+static void hws_send_engine_poll_list(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ s64 *polled,
+ u32 res_nb)
+{
+ struct mlx5hws_completed_poll *comp = &queue->completed;
+
+ while (comp->ci != comp->pi) {
+ if (*polled < res_nb) {
+ res[*polled].status =
+ comp->entries[comp->ci].status;
+ res[*polled].user_data =
+ comp->entries[comp->ci].user_data;
+ (*polled)++;
+ comp->ci = (comp->ci + 1) & comp->mask;
+ mlx5hws_send_engine_dec_rule(queue);
+ } else {
+ return;
+ }
+ }
+}
+
+static int hws_send_engine_poll(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb)
+{
+ s64 polled = 0;
+
+ hws_send_engine_poll_list(queue, res, &polled, res_nb);
+
+ if (polled >= res_nb)
+ return polled;
+
+ hws_send_engine_poll_cq(queue, res, &polled, res_nb);
+
+ return polled;
+}
+
+int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb)
+{
+ return hws_send_engine_poll(&ctx->send_queue[queue_id], res, res_nb);
+}
+
+static int hws_send_ring_alloc_sq(struct mlx5_core_dev *mdev,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ void *sqc_data)
+{
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5_wq_param param;
+ size_t buf_sz;
+ int err;
+
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->mdev = mdev;
+
+ param.db_numa_node = numa_node;
+ param.buf_numa_node = numa_node;
+ err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ wq->db = &wq->db[MLX5_SND_DBR];
+
+ buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
+ sq->dep_wqe = kcalloc(queue->num_entries, sizeof(*sq->dep_wqe), GFP_KERNEL);
+ if (!sq->dep_wqe) {
+ err = -ENOMEM;
+ goto destroy_wq_cyc;
+ }
+
+ sq->wr_priv = kzalloc(sizeof(*sq->wr_priv) * buf_sz, GFP_KERNEL);
+ if (!sq->wr_priv) {
+ err = -ENOMEM;
+ goto free_dep_wqe;
+ }
+
+ sq->buf_mask = (queue->num_entries * MAX_WQES_PER_RULE) - 1;
+
+ return 0;
+
+free_dep_wqe:
+ kfree(sq->dep_wqe);
+destroy_wq_cyc:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ return err;
+}
+
+static void hws_send_ring_free_sq(struct mlx5hws_send_ring_sq *sq)
+{
+ if (!sq)
+ return;
+ kfree(sq->wr_priv);
+ kfree(sq->dep_wqe);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+ u8 ts_format;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, cq->mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME :
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void hws_send_ring_destroy_sq(struct mlx5_core_dev *mdev,
+ struct mlx5hws_send_ring_sq *sq)
+{
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+}
+
+static int hws_send_ring_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ void *in, *sqc;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, sqn, in);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void hws_send_ring_close_sq(struct mlx5hws_send_ring_sq *sq)
+{
+ mlx5_core_destroy_sq(sq->mdev, sq->sqn);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ kfree(sq->wr_priv);
+ kfree(sq->dep_wqe);
+}
+
+static int hws_send_ring_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ int err;
+
+ err = hws_send_ring_create_sq(mdev, pdn, sqc_data, queue, sq, cq);
+ if (err)
+ return err;
+
+ err = hws_send_ring_set_sq_rdy(mdev, sq->sqn);
+ if (err)
+ hws_send_ring_destroy_sq(mdev, sq);
+
+ return err;
+}
+
+static int hws_send_ring_open_sq(struct mlx5hws_context *ctx,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ size_t buf_sz, sq_log_buf_sz;
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
+ sq_log_buf_sz = ilog2(roundup_pow_of_two(buf_sz));
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, ctx->pd_num);
+ MLX5_SET(wq, wq, log_wq_sz, sq_log_buf_sz);
+
+ err = hws_send_ring_alloc_sq(ctx->mdev, numa_node, queue, sq, sqc_data);
+ if (err)
+ goto err_free_sqc;
+
+ err = hws_send_ring_create_sq_rdy(ctx->mdev, ctx->pd_num, sqc_data,
+ queue, sq, cq);
+ if (err)
+ goto err_free_sq;
+
+ kvfree(sqc_data);
+
+ return 0;
+err_free_sq:
+ hws_send_ring_free_sq(sq);
+err_free_sqc:
+ kvfree(sqc_data);
+ return err;
+}
+
+static void hws_cq_complete(struct mlx5_core_cq *mcq,
+ struct mlx5_eqe *eqe)
+{
+ pr_err("CQ completion CQ: #%u\n", mcq->cqn);
+}
+
+static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ void *cqc_data,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param;
+ struct mlx5_cqe64 *cqe;
+ int err;
+ u32 i;
+
+ param.buf_numa_node = numa_node;
+ param.db_numa_node = numa_node;
+
+ err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+ mcq->comp = hws_cq_complete;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int hws_send_ring_create_cq(struct mlx5_core_dev *mdev,
+ struct mlx5hws_send_engine *queue,
+ void *cqc_data,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ void *in, *cqc;
+ int inlen, eqn;
+ int err;
+
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
+ struct mlx5hws_send_engine *queue,
+ int numa_node,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ void *cqc_data;
+ int err;
+
+ cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc_data)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries);
+ MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries));
+
+ err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
+ if (err)
+ goto err_out;
+
+ err = hws_send_ring_create_cq(mdev, queue, cqc_data, cq);
+ if (err)
+ goto err_free_cq;
+
+ kvfree(cqc_data);
+
+ return 0;
+
+err_free_cq:
+ mlx5_wq_destroy(&cq->wq_ctrl);
+err_out:
+ kvfree(cqc_data);
+ return err;
+}
+
+static void hws_send_ring_close_cq(struct mlx5hws_send_ring_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static void hws_send_ring_close(struct mlx5hws_send_engine *queue)
+{
+ hws_send_ring_close_sq(&queue->send_ring.send_sq);
+ hws_send_ring_close_cq(&queue->send_ring.send_cq);
+}
+
+static int mlx5hws_send_ring_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue)
+{
+ int numa_node = dev_to_node(mlx5_core_dma_dev(ctx->mdev));
+ struct mlx5hws_send_ring *ring = &queue->send_ring;
+ int err;
+
+ err = hws_send_ring_open_cq(ctx->mdev, queue, numa_node, &ring->send_cq);
+ if (err)
+ return err;
+
+ err = hws_send_ring_open_sq(ctx, numa_node, queue, &ring->send_sq,
+ &ring->send_cq);
+ if (err)
+ goto close_cq;
+
+ return err;
+
+close_cq:
+ hws_send_ring_close_cq(&ring->send_cq);
+ return err;
+}
+
+void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
+{
+ hws_send_ring_close(queue);
+ kfree(queue->completed.entries);
+}
+
+int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size)
+{
+ int err;
+
+ mutex_init(&queue->lock);
+
+ queue->num_entries = roundup_pow_of_two(queue_size);
+ queue->used_entries = 0;
+
+ queue->completed.entries = kcalloc(queue->num_entries,
+ sizeof(queue->completed.entries[0]),
+ GFP_KERNEL);
+ if (!queue->completed.entries)
+ return -ENOMEM;
+
+ queue->completed.pi = 0;
+ queue->completed.ci = 0;
+ queue->completed.mask = queue->num_entries - 1;
+ err = mlx5hws_send_ring_open(ctx, queue);
+ if (err)
+ goto free_completed_entries;
+
+ return 0;
+
+free_completed_entries:
+ kfree(queue->completed.entries);
+ return err;
+}
+
+static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues)
+{
+ while (queues--)
+ mlx5hws_send_queue_close(&ctx->send_queue[queues]);
+}
+
+static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx)
+{
+ int bwc_queues = mlx5hws_bwc_queues(ctx);
+ int i;
+
+ if (!mlx5hws_context_bwc_supported(ctx))
+ return;
+
+ for (i = 0; i < bwc_queues; i++) {
+ mutex_destroy(&ctx->bwc_send_queue_locks[i]);
+ lockdep_unregister_key(ctx->bwc_lock_class_keys + i);
+ }
+
+ kfree(ctx->bwc_lock_class_keys);
+ kfree(ctx->bwc_send_queue_locks);
+}
+
+void mlx5hws_send_queues_close(struct mlx5hws_context *ctx)
+{
+ hws_send_queues_bwc_locks_destroy(ctx);
+ __hws_send_queues_close(ctx, ctx->queues);
+ kfree(ctx->send_queue);
+}
+
+static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
+{
+ /* Number of BWC queues is equal to number of the usual HWS queues */
+ int bwc_queues = ctx->queues - 1;
+ int i;
+
+ if (!mlx5hws_context_bwc_supported(ctx))
+ return 0;
+
+ ctx->queues += bwc_queues;
+
+ ctx->bwc_send_queue_locks = kcalloc(bwc_queues,
+ sizeof(*ctx->bwc_send_queue_locks),
+ GFP_KERNEL);
+
+ if (!ctx->bwc_send_queue_locks)
+ return -ENOMEM;
+
+ ctx->bwc_lock_class_keys = kcalloc(bwc_queues,
+ sizeof(*ctx->bwc_lock_class_keys),
+ GFP_KERNEL);
+ if (!ctx->bwc_lock_class_keys)
+ goto err_lock_class_keys;
+
+ for (i = 0; i < bwc_queues; i++) {
+ mutex_init(&ctx->bwc_send_queue_locks[i]);
+ lockdep_register_key(ctx->bwc_lock_class_keys + i);
+ }
+
+ return 0;
+
+err_lock_class_keys:
+ kfree(ctx->bwc_send_queue_locks);
+ return -ENOMEM;
+}
+
+int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size)
+{
+ int err = 0;
+ u32 i;
+
+ /* Open one extra queue for control path */
+ ctx->queues = queues + 1;
+
+ /* open a separate set of queues and locks for bwc API */
+ err = hws_bwc_send_queues_init(ctx);
+ if (err)
+ return err;
+
+ ctx->send_queue = kcalloc(ctx->queues, sizeof(*ctx->send_queue), GFP_KERNEL);
+ if (!ctx->send_queue) {
+ err = -ENOMEM;
+ goto free_bwc_locks;
+ }
+
+ for (i = 0; i < ctx->queues; i++) {
+ err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
+ if (err)
+ goto close_send_queues;
+ }
+
+ return 0;
+
+close_send_queues:
+ __hws_send_queues_close(ctx, i);
+
+ kfree(ctx->send_queue);
+
+free_bwc_locks:
+ hws_send_queues_bwc_locks_destroy(ctx);
+
+ return err;
+}
+
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions)
+{
+ struct mlx5hws_send_ring_sq *send_sq;
+ struct mlx5hws_send_engine *queue;
+ bool wait_comp = false;
+ s64 polled = 0;
+
+ queue = &ctx->send_queue[queue_id];
+ send_sq = &queue->send_ring.send_sq;
+
+ switch (actions) {
+ case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC:
+ wait_comp = true;
+ fallthrough;
+ case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC:
+ if (send_sq->head_dep_idx != send_sq->tail_dep_idx)
+ /* Send dependent WQEs to drain the queue */
+ mlx5hws_send_all_dep_wqe(queue);
+ else
+ /* Signal on the last posted WQE */
+ mlx5hws_send_engine_flush_queue(queue);
+
+ /* Poll queue until empty */
+ while (wait_comp && !mlx5hws_send_engine_empty(queue))
+ hws_send_engine_poll_cq(queue, NULL, &polled, 0);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+hws_send_wqe_fw(struct mlx5_core_dev *mdev,
+ u32 pd_num,
+ struct mlx5hws_send_engine_post_attr *send_attr,
+ struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
+ void *send_wqe_match_data,
+ void *send_wqe_match_tag,
+ void *send_wqe_range_data,
+ void *send_wqe_range_tag,
+ bool is_jumbo,
+ u8 gta_opcode)
+{
+ bool has_range = send_wqe_range_data || send_wqe_range_tag;
+ bool has_match = send_wqe_match_data || send_wqe_match_tag;
+ struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data0 = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data1 = {0};
+ struct mlx5hws_wqe_gta_ctrl_seg gta_wqe_ctrl = {0};
+ struct mlx5hws_cmd_generate_wqe_attr attr = {0};
+ struct mlx5hws_wqe_ctrl_seg wqe_ctrl = {0};
+ struct mlx5_cqe64 cqe;
+ u32 flags = 0;
+ int ret;
+
+ /* Set WQE control */
+ wqe_ctrl.opmod_idx_opcode = cpu_to_be32((send_attr->opmod << 24) | send_attr->opcode);
+ wqe_ctrl.qpn_ds = cpu_to_be32((send_attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16);
+ flags |= send_attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ wqe_ctrl.flags = cpu_to_be32(flags);
+ wqe_ctrl.imm = cpu_to_be32(send_attr->id);
+
+ /* Set GTA WQE CTRL */
+ memcpy(gta_wqe_ctrl.stc_ix, send_wqe_ctrl->stc_ix, sizeof(send_wqe_ctrl->stc_ix));
+ gta_wqe_ctrl.op_dirix = cpu_to_be32(gta_opcode << 28);
+
+ /* Set GTA match WQE DATA */
+ if (has_match) {
+ if (send_wqe_match_data)
+ memcpy(&gta_wqe_data0, send_wqe_match_data, sizeof(gta_wqe_data0));
+ else
+ hws_send_wqe_set_tag(&gta_wqe_data0, send_wqe_match_tag, is_jumbo);
+
+ gta_wqe_data0.rsvd1_definer = cpu_to_be32(send_attr->match_definer_id << 8);
+ attr.gta_data_0 = (u8 *)&gta_wqe_data0;
+ }
+
+ /* Set GTA range WQE DATA */
+ if (has_range) {
+ if (send_wqe_range_data)
+ memcpy(&gta_wqe_data1, send_wqe_range_data, sizeof(gta_wqe_data1));
+ else
+ hws_send_wqe_set_tag(&gta_wqe_data1, send_wqe_range_tag, false);
+
+ gta_wqe_data1.rsvd1_definer = cpu_to_be32(send_attr->range_definer_id << 8);
+ attr.gta_data_1 = (u8 *)&gta_wqe_data1;
+ }
+
+ attr.pdn = pd_num;
+ attr.wqe_ctrl = (u8 *)&wqe_ctrl;
+ attr.gta_ctrl = (u8 *)&gta_wqe_ctrl;
+
+send_wqe:
+ ret = mlx5hws_cmd_generate_wqe(mdev, &attr, &cqe);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to write WQE using command");
+ return ret;
+ }
+
+ if ((get_cqe_opcode(&cqe) == MLX5_CQE_REQ) &&
+ (be32_to_cpu(cqe.byte_cnt) >> 31 == 0)) {
+ *send_attr->used_id = send_attr->id;
+ return 0;
+ }
+
+ /* Retry if rule failed */
+ if (send_attr->retry_id) {
+ wqe_ctrl.imm = cpu_to_be32(send_attr->retry_id);
+ send_attr->id = send_attr->retry_id;
+ send_attr->retry_id = 0;
+ goto send_wqe;
+ }
+
+ return -1;
+}
+
+void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
+ struct mlx5hws_rule *rule = send_attr->rule;
+ struct mlx5_core_dev *mdev;
+ u16 queue_id;
+ u32 pdn;
+ int ret;
+
+ queue_id = queue - ctx->send_queue;
+ mdev = ctx->mdev;
+ pdn = ctx->pd_num;
+
+ /* Writing through FW can't HW fence, therefore we drain the queue */
+ if (send_attr->fence)
+ mlx5hws_send_queue_action(ctx,
+ queue_id,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
+ if (ste_attr->rtc_1) {
+ send_attr->id = ste_attr->rtc_1;
+ send_attr->used_id = ste_attr->used_id_rtc_1;
+ send_attr->retry_id = ste_attr->retry_rtc_1;
+ ret = hws_send_wqe_fw(mdev, pdn, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->range_wqe_data,
+ ste_attr->range_wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode);
+ if (ret)
+ goto fail_rule;
+ }
+
+ if (ste_attr->rtc_0) {
+ send_attr->id = ste_attr->rtc_0;
+ send_attr->used_id = ste_attr->used_id_rtc_0;
+ send_attr->retry_id = ste_attr->retry_rtc_0;
+ ret = hws_send_wqe_fw(mdev, pdn, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->range_wqe_data,
+ ste_attr->range_wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode);
+ if (ret)
+ goto fail_rule;
+ }
+
+ /* Increase the status, this only works on good flow as the enum
+ * is arrange it away creating -> created -> deleting -> deleted
+ */
+ if (likely(rule))
+ rule->status++;
+
+ mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_SUCCESS);
+
+ return;
+
+fail_rule:
+ if (likely(rule))
+ rule->status = !rule->rtc_0 && !rule->rtc_1 ?
+ MLX5HWS_RULE_STATUS_FAILED : MLX5HWS_RULE_STATUS_FAILING;
+
+ mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_ERROR);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h
new file mode 100644
index 000000000000..b50825d6dc53
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_SEND_H_
+#define MLX5HWS_SEND_H_
+
+/* As a single operation requires at least two WQEBBS.
+ * This means a maximum of 16 such operations per rule.
+ */
+#define MAX_WQES_PER_RULE 32
+
+enum mlx5hws_wqe_opcode {
+ MLX5HWS_WQE_OPCODE_TBL_ACCESS = 0x2c,
+};
+
+enum mlx5hws_wqe_opmod {
+ MLX5HWS_WQE_OPMOD_GTA_STE = 0,
+ MLX5HWS_WQE_OPMOD_GTA_MOD_ARG = 1,
+};
+
+enum mlx5hws_wqe_gta_opcode {
+ MLX5HWS_WQE_GTA_OP_ACTIVATE = 0,
+ MLX5HWS_WQE_GTA_OP_DEACTIVATE = 1,
+};
+
+enum mlx5hws_wqe_gta_opmod {
+ MLX5HWS_WQE_GTA_OPMOD_STE = 0,
+ MLX5HWS_WQE_GTA_OPMOD_MOD_ARG = 1,
+};
+
+enum mlx5hws_wqe_gta_sz {
+ MLX5HWS_WQE_SZ_GTA_CTRL = 48,
+ MLX5HWS_WQE_SZ_GTA_DATA = 64,
+};
+
+/* WQE Control segment. */
+struct mlx5hws_wqe_ctrl_seg {
+ __be32 opmod_idx_opcode;
+ __be32 qpn_ds;
+ __be32 flags;
+ __be32 imm;
+};
+
+struct mlx5hws_wqe_gta_ctrl_seg {
+ __be32 op_dirix;
+ __be32 stc_ix[5];
+ __be32 rsvd0[6];
+};
+
+struct mlx5hws_wqe_gta_data_seg_ste {
+ __be32 rsvd0_ctr_id;
+ __be32 rsvd1_definer;
+ __be32 rsvd2[3];
+ union {
+ struct {
+ __be32 action[3];
+ __be32 tag[8];
+ };
+ __be32 jumbo[11];
+ };
+};
+
+struct mlx5hws_wqe_gta_data_seg_arg {
+ __be32 action_args[8];
+};
+
+struct mlx5hws_wqe_gta {
+ struct mlx5hws_wqe_gta_ctrl_seg gta_ctrl;
+ union {
+ struct mlx5hws_wqe_gta_data_seg_ste seg_ste;
+ struct mlx5hws_wqe_gta_data_seg_arg seg_arg;
+ };
+};
+
+struct mlx5hws_send_ring_cq {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_cqwq wq;
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_core_cq mcq;
+ u16 poll_wqe;
+};
+
+struct mlx5hws_send_ring_priv {
+ struct mlx5hws_rule *rule;
+ void *user_data;
+ u32 num_wqebbs;
+ u32 id;
+ u32 retry_id;
+ u32 *used_id;
+};
+
+struct mlx5hws_send_ring_dep_wqe {
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl;
+ struct mlx5hws_wqe_gta_data_seg_ste wqe_data;
+ struct mlx5hws_rule *rule;
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 retry_rtc_0;
+ u32 retry_rtc_1;
+ u32 direct_index;
+ void *user_data;
+};
+
+struct mlx5hws_send_ring_sq {
+ struct mlx5_core_dev *mdev;
+ u16 cur_post;
+ u16 buf_mask;
+ struct mlx5hws_send_ring_priv *wr_priv;
+ unsigned int last_idx;
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+ unsigned int head_dep_idx;
+ unsigned int tail_dep_idx;
+ u32 sqn;
+ struct mlx5_wq_cyc wq;
+ struct mlx5_wq_ctrl wq_ctrl;
+ void __iomem *uar_map;
+};
+
+struct mlx5hws_send_ring {
+ struct mlx5hws_send_ring_cq send_cq;
+ struct mlx5hws_send_ring_sq send_sq;
+};
+
+struct mlx5hws_completed_poll_entry {
+ void *user_data;
+ enum mlx5hws_flow_op_status status;
+};
+
+struct mlx5hws_completed_poll {
+ struct mlx5hws_completed_poll_entry *entries;
+ u16 ci;
+ u16 pi;
+ u16 mask;
+};
+
+struct mlx5hws_send_engine {
+ struct mlx5hws_send_ring send_ring;
+ struct mlx5_uars_page *uar; /* Uar is shared between rings of a queue */
+ struct mlx5hws_completed_poll completed;
+ u16 used_entries;
+ u16 num_entries;
+ bool err;
+ struct mutex lock; /* Protects the send engine */
+};
+
+struct mlx5hws_send_engine_post_ctrl {
+ struct mlx5hws_send_engine *queue;
+ struct mlx5hws_send_ring *send_ring;
+ size_t num_wqebbs;
+};
+
+struct mlx5hws_send_engine_post_attr {
+ u8 opcode;
+ u8 opmod;
+ u8 notify_hw;
+ u8 fence;
+ u8 match_definer_id;
+ u8 range_definer_id;
+ size_t len;
+ struct mlx5hws_rule *rule;
+ u32 id;
+ u32 retry_id;
+ u32 *used_id;
+ void *user_data;
+};
+
+struct mlx5hws_send_ste_attr {
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 retry_rtc_0;
+ u32 retry_rtc_1;
+ u32 *used_id_rtc_0;
+ u32 *used_id_rtc_1;
+ bool wqe_tag_is_jumbo;
+ u8 gta_opcode;
+ u32 direct_index;
+ struct mlx5hws_send_engine_post_attr send_attr;
+ struct mlx5hws_rule_match_tag *wqe_tag;
+ struct mlx5hws_rule_match_tag *range_wqe_tag;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_data_seg_ste *range_wqe_data;
+};
+
+struct mlx5hws_send_ring_dep_wqe *
+mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);
+
+int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size);
+
+void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
+
+int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size);
+
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions);
+
+int mlx5hws_send_test(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size);
+
+struct mlx5hws_send_engine_post_ctrl
+mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ char **buf, size_t *len);
+
+void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ struct mlx5hws_send_engine_post_attr *attr);
+
+void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr);
+
+void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr);
+
+void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue);
+
+static inline bool mlx5hws_send_engine_empty(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ struct mlx5hws_send_ring_cq *send_cq = &queue->send_ring.send_cq;
+
+ return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe);
+}
+
+static inline bool mlx5hws_send_engine_full(struct mlx5hws_send_engine *queue)
+{
+ return queue->used_entries >= queue->num_entries;
+}
+
+static inline void mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine *queue)
+{
+ queue->used_entries++;
+}
+
+static inline void mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine *queue)
+{
+ queue->used_entries--;
+}
+
+static inline void mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine *queue,
+ void *user_data,
+ int comp_status)
+{
+ struct mlx5hws_completed_poll *comp = &queue->completed;
+
+ comp->entries[comp->pi].status = comp_status;
+ comp->entries[comp->pi].user_data = user_data;
+
+ comp->pi = (comp->pi + 1) & comp->mask;
+}
+
+static inline bool mlx5hws_send_engine_err(struct mlx5hws_send_engine *queue)
+{
+ return queue->err;
+}
+
+#endif /* MLX5HWS_SEND_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c
new file mode 100644
index 000000000000..8c063a8d87d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl)
+{
+ return tbl->ft_id;
+}
+
+static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr)
+{
+ ft_attr->type = tbl->fw_ft_type;
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1;
+ else
+ ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1;
+ ft_attr->rtc_valid = true;
+}
+
+static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr)
+{
+ /* Enabling reformat_en or decap_en for the first flow table
+ * must be done when all VFs are down.
+ * However, HWS doesn't know when it is required to create the first FT.
+ * On the other hand, HWS doesn't use all these FT capabilities at all
+ * (the API doesn't even provide a way to specify these flags), so we'll
+ * just set these caps on all the flow tables.
+ * If HCA_CAP.fdb_dynamic_tunnel is set, this constraint is N/A.
+ */
+ if (!MLX5_CAP_ESW_FLOWTABLE(tbl->ctx->mdev, fdb_dynamic_tunnel)) {
+ ft_attr->reformat_en = true;
+ ft_attr->decap_en = true;
+ }
+}
+
+static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+ struct mlx5hws_cmd_set_fte_dest dest = {0};
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u8 tbl_type = tbl->type;
+
+ if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
+ return 0;
+
+ if (ctx->common_res[tbl_type].default_miss) {
+ ctx->common_res[tbl_type].default_miss->refcount++;
+ return 0;
+ }
+
+ ft_attr.type = tbl->fw_ft_type;
+ ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */
+ ft_attr.rtc_valid = false;
+
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.destination_id = ctx->caps->eswitch_manager_vport_number;
+
+ fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
+
+ default_miss = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!default_miss) {
+ mlx5hws_err(ctx, "Failed to default miss table type: 0x%x\n", tbl_type);
+ return -EINVAL;
+ }
+
+ /* ctx->ctrl_lock must be held here */
+ ctx->common_res[tbl_type].default_miss = default_miss;
+ ctx->common_res[tbl_type].default_miss->refcount++;
+
+ return 0;
+}
+
+/* Called under ctx->ctrl_lock */
+static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u8 tbl_type = tbl->type;
+
+ if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
+ return;
+
+ default_miss = ctx->common_res[tbl_type].default_miss;
+ if (--default_miss->refcount)
+ return;
+
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss);
+ ctx->common_res[tbl_type].default_miss = NULL;
+}
+
+static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ int ret;
+
+ if (unlikely(tbl->type != MLX5HWS_TABLE_TYPE_FDB))
+ pr_warn("HWS: invalid table type %d\n", tbl->type);
+
+ mlx5hws_cmd_set_attr_connect_miss_tbl(tbl->ctx,
+ tbl->fw_ft_type,
+ tbl->type,
+ &ft_attr);
+
+ ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to connect FT to default FDB FT\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
+ struct mlx5hws_table *tbl,
+ u32 *ft_id)
+{
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ int ret;
+
+ hws_table_init_next_ft_attr(tbl, &ft_attr);
+ hws_table_set_cap_attr(tbl, &ft_attr);
+
+ ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed creating default ft\n");
+ return ret;
+ }
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ /* Take/create ref over the default miss */
+ ret = hws_table_up_default_fdb_miss_tbl(tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to get default fdb miss\n");
+ goto free_ft_obj;
+ }
+ ret = hws_table_connect_to_default_miss_tbl(tbl, *ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed connecting to default miss tbl\n");
+ goto down_miss_tbl;
+ }
+ }
+
+ return 0;
+
+down_miss_tbl:
+ hws_table_down_default_fdb_miss_tbl(tbl);
+free_ft_obj:
+ mlx5hws_cmd_flow_table_destroy(mdev, ft_attr.type, *ft_id);
+ return ret;
+}
+
+void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
+ u32 ft_id)
+{
+ mlx5hws_cmd_flow_table_destroy(tbl->ctx->mdev, tbl->fw_ft_type, ft_id);
+ hws_table_down_default_fdb_miss_tbl(tbl);
+}
+
+static int hws_table_init_check_hws_support(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) {
+ mlx5hws_err(ctx, "HWS not supported, cannot create mlx5hws_table\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int hws_table_init(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ ret = hws_table_init_check_hws_support(ctx, tbl);
+ if (ret)
+ return ret;
+
+ if (mlx5hws_table_get_fw_ft_type(tbl->type, (u8 *)&tbl->fw_ft_type)) {
+ pr_warn("HWS: invalid table type %d\n", tbl->type);
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to create flow table object\n");
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+ }
+
+ ret = mlx5hws_action_get_default_stc(ctx, tbl->type);
+ if (ret)
+ goto tbl_destroy;
+
+ INIT_LIST_HEAD(&tbl->matchers_list);
+ INIT_LIST_HEAD(&tbl->default_miss.head);
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+tbl_destroy:
+ mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id);
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static void hws_table_uninit(struct mlx5hws_table *tbl)
+{
+ mutex_lock(&tbl->ctx->ctrl_lock);
+ mlx5hws_action_put_default_stc(tbl->ctx, tbl->type);
+ mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id);
+ mutex_unlock(&tbl->ctx->ctrl_lock);
+}
+
+struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_table_attr *attr)
+{
+ struct mlx5hws_table *tbl;
+ int ret;
+
+ if (attr->type > MLX5HWS_TABLE_TYPE_FDB) {
+ mlx5hws_err(ctx, "Invalid table type %d\n", attr->type);
+ return NULL;
+ }
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ return NULL;
+
+ tbl->ctx = ctx;
+ tbl->type = attr->type;
+ tbl->level = attr->level;
+
+ ret = hws_table_init(tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to initialise table\n");
+ goto free_tbl;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ list_add(&tbl->tbl_list_node, &ctx->tbl_list);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return tbl;
+
+free_tbl:
+ kfree(tbl);
+ return NULL;
+}
+
+int mlx5hws_table_destroy(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (!list_empty(&tbl->matchers_list)) {
+ mlx5hws_err(tbl->ctx, "Cannot destroy table containing matchers\n");
+ ret = -EBUSY;
+ goto unlock_err;
+ }
+
+ if (!list_empty(&tbl->default_miss.head)) {
+ mlx5hws_err(tbl->ctx, "Cannot destroy table pointed by default miss\n");
+ ret = -EBUSY;
+ goto unlock_err;
+ }
+
+ list_del_init(&tbl->tbl_list_node);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ hws_table_uninit(tbl);
+ kfree(tbl);
+
+ return 0;
+
+unlock_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static u32 hws_table_get_last_ft(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_matcher *matcher;
+
+ if (list_empty(&tbl->matchers_list))
+ return tbl->ft_id;
+
+ matcher = list_last_entry(&tbl->matchers_list, struct mlx5hws_matcher, list_node);
+ return matcher->end_ft_id;
+}
+
+int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ int ret;
+
+ /* Due to FW limitation, resetting the flow table to default action will
+ * disconnect RTC when ignore_flow_level_rtc_valid is not supported.
+ */
+ if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid)
+ return 0;
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ return hws_table_connect_to_default_miss_tbl(tbl, ft_id);
+
+ ft_attr.type = tbl->fw_ft_type;
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT;
+
+ ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to set FT default miss action\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 rtc_0_id,
+ u32 rtc_1_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID;
+ ft_attr.type = fw_ft_type;
+ ft_attr.rtc_id_0 = rtc_0_id;
+ ft_attr.rtc_id_1 = rtc_1_id;
+
+ return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
+}
+
+static int hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 next_ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
+ ft_attr.type = fw_ft_type;
+ ft_attr.table_miss_id = next_ft_id;
+
+ return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
+}
+
+int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl)
+{
+ struct mlx5hws_table *src_tbl;
+ int ret;
+
+ if (list_empty(&dst_tbl->default_miss.head))
+ return 0;
+
+ list_for_each_entry(src_tbl, &dst_tbl->default_miss.head, default_miss.next) {
+ ret = mlx5hws_table_connect_to_miss_table(src_tbl, dst_tbl);
+ if (ret) {
+ mlx5hws_err(dst_tbl->ctx,
+ "Failed to update source miss table, unexpected behavior\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
+ struct mlx5hws_table *dst_tbl)
+{
+ struct mlx5hws_matcher *matcher;
+ u32 last_ft_id;
+ int ret;
+
+ last_ft_id = hws_table_get_last_ft(src_tbl);
+
+ if (dst_tbl) {
+ if (list_empty(&dst_tbl->matchers_list)) {
+ /* Connect src_tbl last_ft to dst_tbl start anchor */
+ ret = hws_table_ft_set_next_ft(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ dst_tbl->ft_id);
+ if (ret)
+ return ret;
+
+ /* Reset last_ft RTC to default RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ 0, 0);
+ if (ret)
+ return ret;
+ } else {
+ /* Connect src_tbl last_ft to first matcher RTC */
+ matcher = list_first_entry(&dst_tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret)
+ return ret;
+
+ /* Reset next miss FT to default */
+ ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /* Reset next miss FT to default */
+ ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id);
+ if (ret)
+ return ret;
+
+ /* Reset last_ft RTC to default RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ 0, 0);
+ if (ret)
+ return ret;
+ }
+
+ src_tbl->default_miss.miss_tbl = dst_tbl;
+
+ return 0;
+}
+
+static int hws_table_set_default_miss_not_valid(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl)
+{
+ if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid) {
+ mlx5hws_err(tbl->ctx, "Default miss table is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((miss_tbl && miss_tbl->type != tbl->type)) {
+ mlx5hws_err(tbl->ctx, "Invalid arguments\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_table *old_miss_tbl;
+ int ret;
+
+ ret = hws_table_set_default_miss_not_valid(tbl, miss_tbl);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ old_miss_tbl = tbl->default_miss.miss_tbl;
+ ret = mlx5hws_table_connect_to_miss_table(tbl, miss_tbl);
+ if (ret)
+ goto out;
+
+ if (old_miss_tbl)
+ list_del_init(&tbl->default_miss.next);
+
+ old_miss_tbl = tbl->default_miss.miss_tbl;
+ if (old_miss_tbl)
+ list_del_init(&old_miss_tbl->default_miss.head);
+
+ if (miss_tbl)
+ list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head);
+
+ mutex_unlock(&ctx->ctrl_lock);
+ return 0;
+out:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h
new file mode 100644
index 000000000000..dd50420eec9e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_TABLE_H_
+#define MLX5HWS_TABLE_H_
+
+struct mlx5hws_default_miss {
+ /* My miss table */
+ struct mlx5hws_table *miss_tbl;
+ struct list_head next;
+ /* Tables missing to my table */
+ struct list_head head;
+};
+
+struct mlx5hws_table {
+ struct mlx5hws_context *ctx;
+ u32 ft_id;
+ enum mlx5hws_table_type type;
+ u32 fw_ft_type;
+ u32 level;
+ struct list_head matchers_list;
+ struct list_head tbl_list_node;
+ struct mlx5hws_default_miss default_miss;
+};
+
+static inline
+u32 mlx5hws_table_get_fw_ft_type(enum mlx5hws_table_type type,
+ u8 *ret_type)
+{
+ if (type != MLX5HWS_TABLE_TYPE_FDB)
+ return -EOPNOTSUPP;
+
+ *ret_type = FS_FT_FDB;
+
+ return 0;
+}
+
+static inline
+u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type,
+ bool is_mirror)
+{
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ return is_mirror ? FS_FT_FDB_TX : FS_FT_FDB_RX;
+
+ return 0;
+}
+
+int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
+ struct mlx5hws_table *tbl,
+ u32 *ft_id);
+
+void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
+ u32 ft_id);
+
+int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
+ struct mlx5hws_table *dst_tbl);
+
+int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl);
+
+int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id);
+
+int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 rtc_0_id,
+ u32 rtc_1_id);
+
+#endif /* MLX5HWS_TABLE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c
new file mode 100644
index 000000000000..faf42421c43f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx)
+{
+ int ret;
+
+ if (!ctx->caps->eswitch_manager)
+ return 0;
+
+ xa_init(&ctx->vports.vport_gvmi_xa);
+
+ /* Set gvmi for eswitch manager and uplink vports only. Rest of the vports
+ * (vport 0 of other function, VFs and SFs) will be queried dynamically.
+ */
+
+ ret = mlx5hws_cmd_query_gvmi(ctx->mdev, false, 0, &ctx->vports.esw_manager_gvmi);
+ if (ret)
+ return ret;
+
+ ctx->vports.uplink_gvmi = 0;
+ return 0;
+}
+
+void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx)
+{
+ if (ctx->caps->eswitch_manager)
+ xa_destroy(&ctx->vports.vport_gvmi_xa);
+}
+
+static int hws_vport_add_gvmi(struct mlx5hws_context *ctx, u16 vport)
+{
+ u16 vport_gvmi;
+ int ret;
+
+ ret = mlx5hws_cmd_query_gvmi(ctx->mdev, true, vport, &vport_gvmi);
+ if (ret)
+ return -EINVAL;
+
+ ret = xa_insert(&ctx->vports.vport_gvmi_xa, vport,
+ xa_mk_value(vport_gvmi), GFP_KERNEL);
+ if (ret)
+ mlx5hws_dbg(ctx, "Couldn't insert new vport gvmi into xarray (%d)\n", ret);
+
+ return ret;
+}
+
+static bool hws_vport_is_esw_mgr_vport(struct mlx5hws_context *ctx, u16 vport)
+{
+ return ctx->caps->is_ecpf ? vport == MLX5_VPORT_ECPF :
+ vport == MLX5_VPORT_PF;
+}
+
+int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi)
+{
+ void *entry;
+ int ret;
+
+ if (!ctx->caps->eswitch_manager)
+ return -EINVAL;
+
+ if (hws_vport_is_esw_mgr_vport(ctx, vport)) {
+ *vport_gvmi = ctx->vports.esw_manager_gvmi;
+ return 0;
+ }
+
+ if (vport == MLX5_VPORT_UPLINK) {
+ *vport_gvmi = ctx->vports.uplink_gvmi;
+ return 0;
+ }
+
+load_entry:
+ entry = xa_load(&ctx->vports.vport_gvmi_xa, vport);
+
+ if (!xa_is_value(entry)) {
+ ret = hws_vport_add_gvmi(ctx, vport);
+ if (ret && ret != -EBUSY)
+ return ret;
+ goto load_entry;
+ }
+
+ *vport_gvmi = (u16)xa_to_value(entry);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h
new file mode 100644
index 000000000000..0912fc166b3a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_VPORT_H_
+#define MLX5HWS_VPORT_H_
+
+int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx);
+
+void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx);
+
+int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi);
+
+#endif /* MLX5HWS_VPORT_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
index bc94e75a7aeb..e7777700ee18 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -40,6 +40,7 @@
*/
#define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0
#define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1
+#define MLXBF_GIGE_MAX_FILTER_IDX 3
/* Define for broadcast MAC literal */
#define BCAST_MAC_ADDR 0xFFFFFFFFFFFF
@@ -175,6 +176,13 @@ enum mlxbf_gige_res {
int mlxbf_gige_mdio_probe(struct platform_device *pdev,
struct mlxbf_gige *priv);
void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
+
+void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv);
+void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv);
+void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index);
+void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index);
void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 dmac);
void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index b157f0f1c5a8..385a56ac7348 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -168,6 +168,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
if (err)
goto napi_deinit;
+ mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX);
+ mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX);
+ mlxbf_gige_enable_multicast_rx(priv);
+
/* Set bits in INT_EN that we care about */
int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
@@ -379,6 +383,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
void __iomem *plu_base;
void __iomem *base;
int addr, phy_irq;
+ unsigned int i;
int err;
base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
@@ -423,6 +428,11 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
+ for (i = 0; i <= MLXBF_GIGE_MAX_FILTER_IDX; i++)
+ mlxbf_gige_disable_mac_rx_filter(priv, i);
+ mlxbf_gige_disable_multicast_rx(priv);
+ mlxbf_gige_disable_promisc(priv);
+
/* Write initial MAC address to hardware */
mlxbf_gige_initial_mac(priv);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index 98a8681c21b9..4d14cb13fd64 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -62,6 +62,8 @@
#define MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL BIT(1)
#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START 0x0520
#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END 0x0528
+#define MLXBF_GIGE_RX_MAC_FILTER_GENERAL 0x0530
+#define MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST BIT(1)
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC 0x0540
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN BIT(0)
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS 0x0548
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
index 699984358493..eb62620b63c7 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
@@ -11,15 +11,31 @@
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
-void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
- unsigned int index, u64 dmac)
+void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv)
{
void __iomem *base = priv->base;
- u64 control;
+ u64 data;
- /* Write destination MAC to specified MAC RX filter */
- writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
- (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
+ data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+ data |= MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
+ writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+}
+
+void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv)
+{
+ void __iomem *base = priv->base;
+ u64 data;
+
+ data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+ data &= ~MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
+ writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+}
+
+void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index)
+{
+ void __iomem *base = priv->base;
+ u64 control;
/* Enable MAC receive filter mask for specified index */
control = readq(base + MLXBF_GIGE_CONTROL);
@@ -27,6 +43,28 @@ void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
writeq(control, base + MLXBF_GIGE_CONTROL);
}
+void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index)
+{
+ void __iomem *base = priv->base;
+ u64 control;
+
+ /* Disable MAC receive filter mask for specified index */
+ control = readq(base + MLXBF_GIGE_CONTROL);
+ control &= ~(MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
+ writeq(control, base + MLXBF_GIGE_CONTROL);
+}
+
+void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index, u64 dmac)
+{
+ void __iomem *base = priv->base;
+
+ /* Write destination MAC to specified MAC RX filter */
+ writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
+ (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
+}
+
void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 *dmac)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index d61478c0c632..e746cd9c68ed 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -165,52 +165,22 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
return -ENODEV;
}
-static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
+static bool mlxsw_thermal_should_bind(struct thermal_zone_device *tzdev,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
{
struct mlxsw_thermal *thermal = thermal_zone_device_priv(tzdev);
- struct device *dev = thermal->bus_info->dev;
- int i, err;
+ const struct mlxsw_cooling_states *state = trip->priv;
/* If the cooling device is one of ours bind it */
if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
+ return false;
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- const struct mlxsw_cooling_states *state = &thermal->cooling_states[i];
+ c->upper = state->max_state;
+ c->lower = state->min_state;
- err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
- state->max_state,
- state->min_state,
- THERMAL_WEIGHT_DEFAULT);
- if (err < 0) {
- dev_err(dev, "Failed to bind cooling device to trip %d\n", i);
- return err;
- }
- }
- return 0;
-}
-
-static int mlxsw_thermal_unbind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
-{
- struct mlxsw_thermal *thermal = thermal_zone_device_priv(tzdev);
- struct device *dev = thermal->bus_info->dev;
- int i;
- int err;
-
- /* If the cooling device is our one unbind it */
- if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
-
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- err = thermal_zone_unbind_cooling_device(tzdev, i, cdev);
- if (err < 0) {
- dev_err(dev, "Failed to unbind cooling device\n");
- return err;
- }
- }
- return 0;
+ return true;
}
static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
@@ -240,57 +210,27 @@ static struct thermal_zone_params mlxsw_thermal_params = {
};
static struct thermal_zone_device_ops mlxsw_thermal_ops = {
- .bind = mlxsw_thermal_bind,
- .unbind = mlxsw_thermal_unbind,
+ .should_bind = mlxsw_thermal_should_bind,
.get_temp = mlxsw_thermal_get_temp,
};
-static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
+static bool mlxsw_thermal_module_should_bind(struct thermal_zone_device *tzdev,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
{
struct mlxsw_thermal_module *tz = thermal_zone_device_priv(tzdev);
+ const struct mlxsw_cooling_states *state = trip->priv;
struct mlxsw_thermal *thermal = tz->parent;
- int i, j, err;
/* If the cooling device is one of ours bind it */
if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
-
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- const struct mlxsw_cooling_states *state = &tz->cooling_states[i];
-
- err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
- state->max_state,
- state->min_state,
- THERMAL_WEIGHT_DEFAULT);
- if (err < 0)
- goto err_thermal_zone_bind_cooling_device;
- }
- return 0;
-
-err_thermal_zone_bind_cooling_device:
- for (j = i - 1; j >= 0; j--)
- thermal_zone_unbind_cooling_device(tzdev, j, cdev);
- return err;
-}
-
-static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
-{
- struct mlxsw_thermal_module *tz = thermal_zone_device_priv(tzdev);
- struct mlxsw_thermal *thermal = tz->parent;
- int i;
- int err;
+ return false;
- /* If the cooling device is one of ours unbind it */
- if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
+ c->upper = state->max_state;
+ c->lower = state->min_state;
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- err = thermal_zone_unbind_cooling_device(tzdev, i, cdev);
- WARN_ON(err);
- }
- return err;
+ return true;
}
static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
@@ -313,8 +253,7 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
}
static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
- .bind = mlxsw_thermal_module_bind,
- .unbind = mlxsw_thermal_module_unbind,
+ .should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_module_temp_get,
};
@@ -342,8 +281,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
}
static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
- .bind = mlxsw_thermal_module_bind,
- .unbind = mlxsw_thermal_module_unbind,
+ .should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_gearbox_temp_get,
};
@@ -411,7 +349,7 @@ static const struct thermal_cooling_device_ops mlxsw_cooling_ops = {
static int
mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
{
- char tz_name[THERMAL_NAME_LENGTH];
+ char tz_name[40];
int err;
if (module_tz->slot_index)
@@ -445,17 +383,14 @@ static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev)
thermal_zone_device_unregister(tzdev);
}
-static void
-mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal,
+static int
+mlxsw_thermal_module_init(struct mlxsw_thermal *thermal,
struct mlxsw_thermal_area *area, u8 module)
{
struct mlxsw_thermal_module *module_tz;
+ int i;
module_tz = &area->tz_module_arr[module];
- /* Skip if parent is already set (case of port split). */
- if (module_tz->parent)
- return;
module_tz->module = module;
module_tz->slot_index = area->slot_index;
module_tz->parent = thermal;
@@ -465,15 +400,15 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
sizeof(thermal->trips));
memcpy(module_tz->cooling_states, default_cooling_states,
sizeof(thermal->cooling_states));
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++)
+ module_tz->trips[i].priv = &module_tz->cooling_states[i];
+
+ return mlxsw_thermal_module_tz_init(module_tz);
}
static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
{
- if (module_tz && module_tz->tzdev) {
- mlxsw_thermal_module_tz_fini(module_tz->tzdev);
- module_tz->tzdev = NULL;
- module_tz->parent = NULL;
- }
+ mlxsw_thermal_module_tz_fini(module_tz->tzdev);
}
static int
@@ -481,7 +416,6 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal *thermal,
struct mlxsw_thermal_area *area)
{
- struct mlxsw_thermal_module *module_tz;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
int i, err;
@@ -503,22 +437,16 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
if (!area->tz_module_arr)
return -ENOMEM;
- for (i = 0; i < area->tz_module_num; i++)
- mlxsw_thermal_module_init(dev, core, thermal, area, i);
-
for (i = 0; i < area->tz_module_num; i++) {
- module_tz = &area->tz_module_arr[i];
- if (!module_tz->parent)
- continue;
- err = mlxsw_thermal_module_tz_init(module_tz);
+ err = mlxsw_thermal_module_init(thermal, area, i);
if (err)
- goto err_thermal_module_tz_init;
+ goto err_thermal_module_init;
}
return 0;
-err_thermal_module_tz_init:
- for (i = area->tz_module_num - 1; i >= 0; i--)
+err_thermal_module_init:
+ for (i--; i >= 0; i--)
mlxsw_thermal_module_fini(&area->tz_module_arr[i]);
kfree(area->tz_module_arr);
return err;
@@ -579,7 +507,7 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal_module *gearbox_tz;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
u8 gbox_num;
- int i;
+ int i, j;
int err;
mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index);
@@ -606,6 +534,9 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
sizeof(thermal->trips));
memcpy(gearbox_tz->cooling_states, default_cooling_states,
sizeof(thermal->cooling_states));
+ for (j = 0; j < MLXSW_THERMAL_NUM_TRIPS; j++)
+ gearbox_tz->trips[j].priv = &gearbox_tz->cooling_states[j];
+
gearbox_tz->module = i;
gearbox_tz->parent = thermal;
gearbox_tz->slot_index = area->slot_index;
@@ -722,6 +653,9 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
thermal->bus_info = bus_info;
memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips));
memcpy(thermal->cooling_states, default_cooling_states, sizeof(thermal->cooling_states));
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++)
+ thermal->trips[i].priv = &thermal->cooling_states[i];
+
thermal->line_cards[0].slot_index = 0;
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
@@ -821,10 +755,7 @@ err_linecards_event_ops_register:
err_thermal_gearboxes_init:
mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
err_thermal_modules_init:
- if (thermal->tzdev) {
- thermal_zone_device_unregister(thermal->tzdev);
- thermal->tzdev = NULL;
- }
+ thermal_zone_device_unregister(thermal->tzdev);
err_thermal_zone_device_register:
err_thermal_cooling_device_register:
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
@@ -845,10 +776,7 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
thermal);
mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]);
mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
- if (thermal->tzdev) {
- thermal_zone_device_unregister(thermal->tzdev);
- thermal->tzdev = NULL;
- }
+ thermal_zone_device_unregister(thermal->tzdev);
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
thermal_cooling_device_unregister(thermal->cdevs[i].cdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index f064789f3240..3f5e5d99251b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1676,9 +1676,11 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
netif_carrier_off(dev);
- dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_TC;
dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
+ dev->lltx = true;
+ dev->netns_local = true;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR;
@@ -2784,7 +2786,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
.hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
.shaper_work = mlxsw_sp1_ptp_shaper_work,
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
.get_ts_info = mlxsw_sp1_ptp_get_ts_info,
+#endif
.get_stats_count = mlxsw_sp1_get_stats_count,
.get_stats_strings = mlxsw_sp1_get_stats_strings,
.get_stats = mlxsw_sp1_get_stats,
@@ -2801,7 +2805,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
.hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
.shaper_work = mlxsw_sp2_ptp_shaper_work,
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
.get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+#endif
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
@@ -2818,7 +2824,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
.hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
.hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
.shaper_work = mlxsw_sp2_ptp_shaper_work,
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
.get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+#endif
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 769095d4932d..c8aa1452fbb9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -11,14 +11,6 @@ struct mlxsw_sp;
struct mlxsw_sp_port;
struct mlxsw_sp_ptp_clock;
-static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct kernel_ethtool_ts_info *info)
-{
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
- return 0;
-}
-
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
struct mlxsw_sp_ptp_clock *
@@ -151,12 +143,6 @@ static inline void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
{
}
-static inline int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct kernel_ethtool_ts_info *info)
-{
- return mlxsw_sp_ptp_get_ts_info_noptp(info);
-}
-
static inline int mlxsw_sp1_get_stats_count(void)
{
return 0;
@@ -226,12 +212,6 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
-static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct kernel_ethtool_ts_info *info)
-{
- return mlxsw_sp_ptp_get_ts_info_noptp(info);
-}
-
static inline int
mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
index c002ede36402..85519690b837 100644
--- a/drivers/net/ethernet/meta/Kconfig
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -23,6 +23,8 @@ config FBNIC
depends on !S390
depends on MAX_SKB_FRAGS < 22
depends on PCI_MSI
+ select NET_DEVLINK
+ select PAGE_POOL
select PHYLINK
help
This driver supports Meta Platforms Host Network Interface.
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index 9373b558fdc9..ed4533a73c57 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -8,7 +8,9 @@
obj-$(CONFIG_FBNIC) += fbnic.o
fbnic-y := fbnic_devlink.o \
+ fbnic_ethtool.o \
fbnic_fw.o \
+ fbnic_hw_stats.o \
fbnic_irq.o \
fbnic_mac.o \
fbnic_netdev.o \
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index ad2689bfd6cb..0f9e8d79461c 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -11,6 +11,7 @@
#include "fbnic_csr.h"
#include "fbnic_fw.h"
+#include "fbnic_hw_stats.h"
#include "fbnic_mac.h"
#include "fbnic_rpc.h"
@@ -47,6 +48,9 @@ struct fbnic_dev {
/* Number of TCQs/RCQs available on hardware */
u16 max_num_queues;
+
+ /* Local copy of hardware statistics */
+ struct fbnic_hw_stats hw_stats;
};
/* Reserve entry 0 in the MSI-X "others" array until we have filled all
@@ -132,6 +136,9 @@ void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data);
void fbnic_free_irqs(struct fbnic_dev *fbd);
int fbnic_alloc_irqs(struct fbnic_dev *fbd);
+void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
+ const size_t str_sz);
+
enum fbnic_boards {
fbnic_board_asic
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index a64360de0552..21db509acbc1 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -660,6 +660,43 @@ enum {
#define FBNIC_SIG_PCS_INTR_MASK 0x11816 /* 0x46058 */
#define FBNIC_CSR_END_SIG 0x1184e /* CSR section delimiter */
+#define FBNIC_CSR_START_MAC_STAT 0x11a00
+#define FBNIC_MAC_STAT_RX_BYTE_COUNT_L 0x11a08 /* 0x46820 */
+#define FBNIC_MAC_STAT_RX_BYTE_COUNT_H 0x11a09 /* 0x46824 */
+#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_L \
+ 0x11a0a /* 0x46828 */
+#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_H \
+ 0x11a0b /* 0x4682c */
+#define FBNIC_MAC_STAT_RX_TOOLONG_L 0x11a0e /* 0x46838 */
+#define FBNIC_MAC_STAT_RX_TOOLONG_H 0x11a0f /* 0x4683c */
+#define FBNIC_MAC_STAT_RX_RECEIVED_OK_L \
+ 0x11a12 /* 0x46848 */
+#define FBNIC_MAC_STAT_RX_RECEIVED_OK_H \
+ 0x11a13 /* 0x4684c */
+#define FBNIC_MAC_STAT_RX_PACKET_BAD_FCS_L \
+ 0x11a14 /* 0x46850 */
+#define FBNIC_MAC_STAT_RX_PACKET_BAD_FCS_H \
+ 0x11a15 /* 0x46854 */
+#define FBNIC_MAC_STAT_RX_IFINERRORS_L 0x11a18 /* 0x46860 */
+#define FBNIC_MAC_STAT_RX_IFINERRORS_H 0x11a19 /* 0x46864 */
+#define FBNIC_MAC_STAT_RX_MULTICAST_L 0x11a1c /* 0x46870 */
+#define FBNIC_MAC_STAT_RX_MULTICAST_H 0x11a1d /* 0x46874 */
+#define FBNIC_MAC_STAT_RX_BROADCAST_L 0x11a1e /* 0x46878 */
+#define FBNIC_MAC_STAT_RX_BROADCAST_H 0x11a1f /* 0x4687c */
+#define FBNIC_MAC_STAT_TX_BYTE_COUNT_L 0x11a3e /* 0x468f8 */
+#define FBNIC_MAC_STAT_TX_BYTE_COUNT_H 0x11a3f /* 0x468fc */
+#define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_L \
+ 0x11a42 /* 0x46908 */
+#define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_H \
+ 0x11a43 /* 0x4690c */
+#define FBNIC_MAC_STAT_TX_IFOUTERRORS_L \
+ 0x11a46 /* 0x46918 */
+#define FBNIC_MAC_STAT_TX_IFOUTERRORS_H \
+ 0x11a47 /* 0x4691c */
+#define FBNIC_MAC_STAT_TX_MULTICAST_L 0x11a4a /* 0x46928 */
+#define FBNIC_MAC_STAT_TX_MULTICAST_H 0x11a4b /* 0x4692c */
+#define FBNIC_MAC_STAT_TX_BROADCAST_L 0x11a4c /* 0x46930 */
+#define FBNIC_MAC_STAT_TX_BROADCAST_H 0x11a4d /* 0x46934 */
/* PUL User Registers */
#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
index e87049dfd223..0072d612215e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <net/devlink.h>
@@ -10,6 +10,56 @@
#define FBNIC_SN_STR_LEN 24
+static int fbnic_version_running_put(struct devlink_info_req *req,
+ struct fbnic_fw_ver *fw_ver,
+ char *ver_name)
+{
+ char running_ver[FBNIC_FW_VER_MAX_SIZE];
+ int err;
+
+ fbnic_mk_fw_ver_str(fw_ver->version, running_ver);
+ err = devlink_info_version_running_put(req, ver_name, running_ver);
+ if (err)
+ return err;
+
+ if (strlen(fw_ver->commit) > 0) {
+ char commit_name[FBNIC_SN_STR_LEN];
+
+ snprintf(commit_name, FBNIC_SN_STR_LEN, "%s.commit", ver_name);
+ err = devlink_info_version_running_put(req, commit_name,
+ fw_ver->commit);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int fbnic_version_stored_put(struct devlink_info_req *req,
+ struct fbnic_fw_ver *fw_ver,
+ char *ver_name)
+{
+ char stored_ver[FBNIC_FW_VER_MAX_SIZE];
+ int err;
+
+ fbnic_mk_fw_ver_str(fw_ver->version, stored_ver);
+ err = devlink_info_version_stored_put(req, ver_name, stored_ver);
+ if (err)
+ return err;
+
+ if (strlen(fw_ver->commit) > 0) {
+ char commit_name[FBNIC_SN_STR_LEN];
+
+ snprintf(commit_name, FBNIC_SN_STR_LEN, "%s.commit", ver_name);
+ err = devlink_info_version_stored_put(req, commit_name,
+ fw_ver->commit);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int fbnic_devlink_info_get(struct devlink *devlink,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
@@ -17,6 +67,31 @@ static int fbnic_devlink_info_get(struct devlink *devlink,
struct fbnic_dev *fbd = devlink_priv(devlink);
int err;
+ err = fbnic_version_running_put(req, &fbd->fw_cap.running.mgmt,
+ DEVLINK_INFO_VERSION_GENERIC_FW);
+ if (err)
+ return err;
+
+ err = fbnic_version_running_put(req, &fbd->fw_cap.running.bootloader,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER);
+ if (err)
+ return err;
+
+ err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.mgmt,
+ DEVLINK_INFO_VERSION_GENERIC_FW);
+ if (err)
+ return err;
+
+ err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.bootloader,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER);
+ if (err)
+ return err;
+
+ err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.undi,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI);
+ if (err)
+ return err;
+
if (fbd->dsn) {
unsigned char serial[FBNIC_SN_STR_LEN];
u8 dsn[8];
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
new file mode 100644
index 000000000000..5d980e178941
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -0,0 +1,75 @@
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+#include "fbnic_tlv.h"
+
+static void
+fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
+ sizeof(drvinfo->fw_version));
+}
+
+static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
+{
+ if (counter->reported)
+ *stat = counter->value;
+}
+
+static void
+fbnic_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *eth_mac_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+ const struct fbnic_mac *mac;
+
+ mac_stats = &fbd->hw_stats.mac;
+ mac = fbd->mac;
+
+ mac->get_eth_mac_stats(fbd, false, &mac_stats->eth_mac);
+
+ fbnic_set_counter(&eth_mac_stats->FramesTransmittedOK,
+ &mac_stats->eth_mac.FramesTransmittedOK);
+ fbnic_set_counter(&eth_mac_stats->FramesReceivedOK,
+ &mac_stats->eth_mac.FramesReceivedOK);
+ fbnic_set_counter(&eth_mac_stats->FrameCheckSequenceErrors,
+ &mac_stats->eth_mac.FrameCheckSequenceErrors);
+ fbnic_set_counter(&eth_mac_stats->AlignmentErrors,
+ &mac_stats->eth_mac.AlignmentErrors);
+ fbnic_set_counter(&eth_mac_stats->OctetsTransmittedOK,
+ &mac_stats->eth_mac.OctetsTransmittedOK);
+ fbnic_set_counter(&eth_mac_stats->FramesLostDueToIntMACXmitError,
+ &mac_stats->eth_mac.FramesLostDueToIntMACXmitError);
+ fbnic_set_counter(&eth_mac_stats->OctetsReceivedOK,
+ &mac_stats->eth_mac.OctetsReceivedOK);
+ fbnic_set_counter(&eth_mac_stats->FramesLostDueToIntMACRcvError,
+ &mac_stats->eth_mac.FramesLostDueToIntMACRcvError);
+ fbnic_set_counter(&eth_mac_stats->MulticastFramesXmittedOK,
+ &mac_stats->eth_mac.MulticastFramesXmittedOK);
+ fbnic_set_counter(&eth_mac_stats->BroadcastFramesXmittedOK,
+ &mac_stats->eth_mac.BroadcastFramesXmittedOK);
+ fbnic_set_counter(&eth_mac_stats->MulticastFramesReceivedOK,
+ &mac_stats->eth_mac.MulticastFramesReceivedOK);
+ fbnic_set_counter(&eth_mac_stats->BroadcastFramesReceivedOK,
+ &mac_stats->eth_mac.BroadcastFramesReceivedOK);
+ fbnic_set_counter(&eth_mac_stats->FrameTooLongErrors,
+ &mac_stats->eth_mac.FrameTooLongErrors);
+}
+
+static const struct ethtool_ops fbnic_ethtool_ops = {
+ .get_drvinfo = fbnic_get_drvinfo,
+ .get_eth_mac_stats = fbnic_get_eth_mac_stats,
+};
+
+void fbnic_set_ethtool_ops(struct net_device *dev)
+{
+ dev->ethtool_ops = &fbnic_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 0c6e1b4c119b..8f7a2a19ddf8 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -789,3 +789,16 @@ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN;
} while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts);
}
+
+void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
+ const size_t str_sz)
+{
+ struct fbnic_fw_ver *mgmt = &fbd->fw_cap.running.mgmt;
+ const char *delim = "";
+
+ if (mgmt->commit[0])
+ delim = "_";
+
+ fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
+ fw_version, str_sz);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index c65bca613665..221faf8c6756 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -53,10 +53,10 @@ int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
-#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str) \
+#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \
do { \
const u32 __rev_id = _rev_id; \
- snprintf(_str, sizeof(_str), "%02lu.%02lu.%02lu-%03lu%s%s", \
+ snprintf(_str, _str_sz, "%02lu.%02lu.%02lu-%03lu%s%s", \
FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MAJOR, __rev_id), \
FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MINOR, __rev_id), \
FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_PATCH, __rev_id), \
@@ -65,7 +65,7 @@ do { \
} while (0)
#define fbnic_mk_fw_ver_str(_rev_id, _str) \
- fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str)
+ fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str, sizeof(_str))
#define FW_HEARTBEAT_PERIOD (10 * HZ)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
new file mode 100644
index 000000000000..a0acc7606aa1
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
@@ -0,0 +1,27 @@
+#include "fbnic.h"
+
+u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset)
+{
+ u32 prev_upper, upper, lower, diff;
+
+ prev_upper = rd32(fbd, reg + offset);
+ lower = rd32(fbd, reg);
+ upper = rd32(fbd, reg + offset);
+
+ diff = upper - prev_upper;
+ if (!diff)
+ return ((u64)upper << 32) | lower;
+
+ if (diff > 1)
+ dev_warn_once(fbd->dev,
+ "Stats inconsistent, upper 32b of %#010x updating too quickly\n",
+ reg * 4);
+
+ /* Return only the upper bits as we cannot guarantee
+ * the accuracy of the lower bits. We will add them in
+ * when the counter slows down enough that we can get
+ * a snapshot with both upper values being the same
+ * between reads.
+ */
+ return ((u64)upper << 32);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
new file mode 100644
index 000000000000..30348904b510
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
@@ -0,0 +1,40 @@
+#include <linux/ethtool.h>
+
+#include "fbnic_csr.h"
+
+struct fbnic_stat_counter {
+ u64 value;
+ union {
+ u32 old_reg_value_32;
+ u64 old_reg_value_64;
+ } u;
+ bool reported;
+};
+
+struct fbnic_eth_mac_stats {
+ struct fbnic_stat_counter FramesTransmittedOK;
+ struct fbnic_stat_counter FramesReceivedOK;
+ struct fbnic_stat_counter FrameCheckSequenceErrors;
+ struct fbnic_stat_counter AlignmentErrors;
+ struct fbnic_stat_counter OctetsTransmittedOK;
+ struct fbnic_stat_counter FramesLostDueToIntMACXmitError;
+ struct fbnic_stat_counter OctetsReceivedOK;
+ struct fbnic_stat_counter FramesLostDueToIntMACRcvError;
+ struct fbnic_stat_counter MulticastFramesXmittedOK;
+ struct fbnic_stat_counter BroadcastFramesXmittedOK;
+ struct fbnic_stat_counter MulticastFramesReceivedOK;
+ struct fbnic_stat_counter BroadcastFramesReceivedOK;
+ struct fbnic_stat_counter FrameTooLongErrors;
+};
+
+struct fbnic_mac_stats {
+ struct fbnic_eth_mac_stats eth_mac;
+};
+
+struct fbnic_hw_stats {
+ struct fbnic_mac_stats mac;
+};
+
+u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset);
+
+void fbnic_get_hw_stats(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 7920e7af82d9..7b654d0a6dac 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -403,6 +403,21 @@ static void fbnic_mac_init_regs(struct fbnic_dev *fbd)
fbnic_mac_init_txb(fbd);
}
+static void __fbnic_mac_stat_rd64(struct fbnic_dev *fbd, bool reset, u32 reg,
+ struct fbnic_stat_counter *stat)
+{
+ u64 new_reg_value;
+
+ new_reg_value = fbnic_stat_rd64(fbd, reg, 1);
+ if (!reset)
+ stat->value += new_reg_value - stat->u.old_reg_value_64;
+ stat->u.old_reg_value_64 = new_reg_value;
+ stat->reported = true;
+}
+
+#define fbnic_mac_stat_rd64(fbd, reset, __stat, __CSR) \
+ __fbnic_mac_stat_rd64(fbd, reset, FBNIC_##__CSR##_L, &(__stat))
+
static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause)
{
u32 rxb_pause_ctrl;
@@ -637,12 +652,47 @@ static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd,
wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg);
}
+static void
+fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_eth_mac_stats *mac_stats)
+{
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->OctetsReceivedOK,
+ MAC_STAT_RX_BYTE_COUNT);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->AlignmentErrors,
+ MAC_STAT_RX_ALIGN_ERROR);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->FrameTooLongErrors,
+ MAC_STAT_RX_TOOLONG);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->FramesReceivedOK,
+ MAC_STAT_RX_RECEIVED_OK);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->FrameCheckSequenceErrors,
+ MAC_STAT_RX_PACKET_BAD_FCS);
+ fbnic_mac_stat_rd64(fbd, reset,
+ mac_stats->FramesLostDueToIntMACRcvError,
+ MAC_STAT_RX_IFINERRORS);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->MulticastFramesReceivedOK,
+ MAC_STAT_RX_MULTICAST);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->BroadcastFramesReceivedOK,
+ MAC_STAT_RX_BROADCAST);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->OctetsTransmittedOK,
+ MAC_STAT_TX_BYTE_COUNT);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->FramesTransmittedOK,
+ MAC_STAT_TX_TRANSMITTED_OK);
+ fbnic_mac_stat_rd64(fbd, reset,
+ mac_stats->FramesLostDueToIntMACXmitError,
+ MAC_STAT_TX_IFOUTERRORS);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->MulticastFramesXmittedOK,
+ MAC_STAT_TX_MULTICAST);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->BroadcastFramesXmittedOK,
+ MAC_STAT_TX_BROADCAST);
+}
+
static const struct fbnic_mac fbnic_mac_asic = {
.init_regs = fbnic_mac_init_regs,
.pcs_enable = fbnic_pcs_enable_asic,
.pcs_disable = fbnic_pcs_disable_asic,
.pcs_get_link = fbnic_pcs_get_link_asic,
.pcs_get_link_event = fbnic_pcs_get_link_event_asic,
+ .get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
.link_down = fbnic_mac_link_down_asic,
.link_up = fbnic_mac_link_up_asic,
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index f53be6e6aef9..476239a9d381 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -78,6 +78,9 @@ struct fbnic_mac {
bool (*pcs_get_link)(struct fbnic_dev *fbd);
int (*pcs_get_link_event)(struct fbnic_dev *fbd);
+ void (*get_eth_mac_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_eth_mac_stats *mac_stats);
+
void (*link_down)(struct fbnic_dev *fbd);
void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index b7ce6da68543..a400616a24d4 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -4,6 +4,7 @@
#include <linux/etherdevice.h>
#include <linux/ipv6.h>
#include <linux/types.h>
+#include <net/netdev_queues.h>
#include "fbnic.h"
#include "fbnic_netdev.h"
@@ -316,6 +317,74 @@ void fbnic_clear_rx_mode(struct net_device *netdev)
__dev_mc_unsync(netdev, NULL);
}
+static void fbnic_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats64)
+{
+ u64 tx_bytes, tx_packets, tx_dropped = 0;
+ u64 rx_bytes, rx_packets, rx_dropped = 0;
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_queue_stats *stats;
+ unsigned int start, i;
+
+ stats = &fbn->tx_stats;
+
+ tx_bytes = stats->bytes;
+ tx_packets = stats->packets;
+ tx_dropped = stats->dropped;
+
+ stats64->tx_bytes = tx_bytes;
+ stats64->tx_packets = tx_packets;
+ stats64->tx_dropped = tx_dropped;
+
+ for (i = 0; i < fbn->num_tx_queues; i++) {
+ struct fbnic_ring *txr = fbn->tx[i];
+
+ if (!txr)
+ continue;
+
+ stats = &txr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tx_bytes = stats->bytes;
+ tx_packets = stats->packets;
+ tx_dropped = stats->dropped;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats64->tx_bytes += tx_bytes;
+ stats64->tx_packets += tx_packets;
+ stats64->tx_dropped += tx_dropped;
+ }
+
+ stats = &fbn->rx_stats;
+
+ rx_bytes = stats->bytes;
+ rx_packets = stats->packets;
+ rx_dropped = stats->dropped;
+
+ stats64->rx_bytes = rx_bytes;
+ stats64->rx_packets = rx_packets;
+ stats64->rx_dropped = rx_dropped;
+
+ for (i = 0; i < fbn->num_rx_queues; i++) {
+ struct fbnic_ring *rxr = fbn->rx[i];
+
+ if (!rxr)
+ continue;
+
+ stats = &rxr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ rx_bytes = stats->bytes;
+ rx_packets = stats->packets;
+ rx_dropped = stats->dropped;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats64->rx_bytes += rx_bytes;
+ stats64->rx_packets += rx_packets;
+ stats64->rx_dropped += rx_dropped;
+ }
+}
+
static const struct net_device_ops fbnic_netdev_ops = {
.ndo_open = fbnic_open,
.ndo_stop = fbnic_stop,
@@ -324,6 +393,72 @@ static const struct net_device_ops fbnic_netdev_ops = {
.ndo_features_check = fbnic_features_check,
.ndo_set_mac_address = fbnic_set_mac,
.ndo_set_rx_mode = fbnic_set_rx_mode,
+ .ndo_get_stats64 = fbnic_get_stats64,
+};
+
+static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *rx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_ring *rxr = fbn->rx[idx];
+ struct fbnic_queue_stats *stats;
+ unsigned int start;
+ u64 bytes, packets;
+
+ if (!rxr)
+ return;
+
+ stats = &rxr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ bytes = stats->bytes;
+ packets = stats->packets;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ rx->bytes = bytes;
+ rx->packets = packets;
+}
+
+static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_ring *txr = fbn->tx[idx];
+ struct fbnic_queue_stats *stats;
+ unsigned int start;
+ u64 bytes, packets;
+
+ if (!txr)
+ return;
+
+ stats = &txr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ bytes = stats->bytes;
+ packets = stats->packets;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ tx->bytes = bytes;
+ tx->packets = packets;
+}
+
+static void fbnic_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+
+ tx->bytes = fbn->tx_stats.bytes;
+ tx->packets = fbn->tx_stats.packets;
+
+ rx->bytes = fbn->rx_stats.bytes;
+ rx->packets = fbn->rx_stats.packets;
+}
+
+static const struct netdev_stat_ops fbnic_stat_ops = {
+ .get_queue_stats_rx = fbnic_get_queue_stats_rx,
+ .get_queue_stats_tx = fbnic_get_queue_stats_tx,
+ .get_base_stats = fbnic_get_base_stats,
};
void fbnic_reset_queues(struct fbnic_net *fbn,
@@ -384,6 +519,9 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbd->netdev = netdev;
netdev->netdev_ops = &fbnic_netdev_ops;
+ netdev->stat_ops = &fbnic_stat_ops;
+
+ fbnic_set_ethtool_ops(netdev);
fbn = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index 6bc0ebeb8182..6c27da09a612 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -40,6 +40,9 @@ struct fbnic_net {
u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN];
u32 rss_flow_hash[FBNIC_NUM_HASH_OPT];
+ /* Storage for stats after ring destruction */
+ struct fbnic_queue_stats tx_stats;
+ struct fbnic_queue_stats rx_stats;
u64 link_down_events;
struct list_head napis;
@@ -55,6 +58,7 @@ int fbnic_netdev_register(struct net_device *netdev);
void fbnic_netdev_unregister(struct net_device *netdev);
void fbnic_reset_queues(struct fbnic_net *fbn,
unsigned int tx, unsigned int rx);
+void fbnic_set_ethtool_ops(struct net_device *dev);
void __fbnic_set_rx_mode(struct net_device *netdev);
void fbnic_clear_rx_mode(struct net_device *netdev);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index 0ed4c9fff5d8..6a6d7e22f1a7 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -273,6 +273,9 @@ fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
err_free:
dev_kfree_skb_any(skb);
err_count:
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped++;
+ u64_stats_update_end(&ring->stats.syncp);
return NETDEV_TX_OK;
}
@@ -363,10 +366,19 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
txq = txring_txq(nv->napi.dev, ring);
if (unlikely(discard)) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+
netdev_tx_completed_queue(txq, total_packets, total_bytes);
return;
}
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.bytes += total_bytes;
+ ring->stats.packets += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+
netif_txq_completed_wake(txq, total_packets, total_bytes,
fbnic_desc_unused(ring),
FBNIC_TX_DESC_WAKEUP);
@@ -730,12 +742,12 @@ static bool fbnic_rcd_metadata_err(u64 rcd)
static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt, int budget)
{
+ unsigned int packets = 0, bytes = 0, dropped = 0;
struct fbnic_ring *rcq = &qt->cmpl;
struct fbnic_pkt_buff *pkt;
s32 head0 = -1, head1 = -1;
__le64 *raw_rcd, done;
u32 head = rcq->head;
- u64 packets = 0;
done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0;
raw_rcd = &rcq->desc[head & rcq->size_mask];
@@ -780,9 +792,11 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
fbnic_populate_skb_fields(nv, rcd, skb, qt);
packets++;
+ bytes += skb->len;
napi_gro_receive(&nv->napi, skb);
} else {
+ dropped++;
fbnic_put_pkt_buff(nv, pkt, 1);
}
@@ -799,6 +813,14 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
}
}
+ u64_stats_update_begin(&rcq->stats.syncp);
+ rcq->stats.packets += packets;
+ rcq->stats.bytes += bytes;
+ /* Re-add ethernet header length (removed in fbnic_build_skb) */
+ rcq->stats.bytes += ETH_HLEN * packets;
+ rcq->stats.dropped += dropped;
+ u64_stats_update_end(&rcq->stats.syncp);
+
/* Unmap and free processed buffers */
if (head0 >= 0)
fbnic_clean_bdq(nv, budget, &qt->sub0, head0);
@@ -865,12 +887,36 @@ static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
+static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ struct fbnic_queue_stats *stats = &rxr->stats;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->rx_stats.bytes += stats->bytes;
+ fbn->rx_stats.packets += stats->packets;
+ fbn->rx_stats.dropped += stats->dropped;
+}
+
+static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ struct fbnic_queue_stats *stats = &txr->stats;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->tx_stats.bytes += stats->bytes;
+ fbn->tx_stats.packets += stats->packets;
+ fbn->tx_stats.dropped += stats->dropped;
+}
+
static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
struct fbnic_ring *txr)
{
if (!(txr->flags & FBNIC_RING_F_STATS))
return;
+ fbnic_aggregate_ring_tx_counters(fbn, txr);
+
/* Remove pointer to the Tx ring */
WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr);
fbn->tx[txr->q_idx] = NULL;
@@ -882,6 +928,8 @@ static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
if (!(rxr->flags & FBNIC_RING_F_STATS))
return;
+ fbnic_aggregate_ring_rx_counters(fbn, rxr);
+
/* Remove pointer to the Rx ring */
WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr);
fbn->rx[rxr->q_idx] = NULL;
@@ -974,6 +1022,7 @@ static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
int q_idx, u8 flags)
{
+ u64_stats_init(&ring->stats.syncp);
ring->doorbell = doorbell;
ring->q_idx = q_idx;
ring->flags = flags;
@@ -1012,14 +1061,14 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
nv->fbd = fbd;
nv->v_idx = v_idx;
- /* Record IRQ to NAPI struct */
- netif_napi_set_irq(&nv->napi,
- pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx));
-
/* Tie napi to netdev */
list_add(&nv->napis, &fbn->napis);
netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
+ /* Record IRQ to NAPI struct */
+ netif_napi_set_irq(&nv->napi,
+ pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx));
+
/* Tie nv back to PCIe dev */
nv->dev = fbd->dev;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index 4a206c0e7192..2f91f68d11d5 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
#include <net/xdp.h>
struct fbnic_net;
@@ -51,6 +52,13 @@ struct fbnic_pkt_buff {
u16 nr_frags;
};
+struct fbnic_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 dropped;
+ struct u64_stats_sync syncp;
+};
+
/* Pagecnt bias is long max to reserve the last bit to catch overflow
* cases where if we overcharge the bias it will flip over to be negative.
*/
@@ -77,6 +85,8 @@ struct fbnic_ring {
u32 head, tail; /* Head/Tail of ring */
+ struct fbnic_queue_stats stats;
+
/* Slow path fields follow */
dma_addr_t dma; /* Phys addr of descriptor memory */
size_t size; /* Size of descriptor ring in memory */
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 43ba71e82260..ee046468652c 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -46,18 +46,21 @@ config LAN743X
tristate "LAN743x support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
- select PHYLIB
select FIXED_PHY
select CRC16
select CRC32
+ select PHYLINK
help
- Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip
+ Support for the Microchip LAN743x and PCI11x1x families of PCI
+ Express Ethernet devices
To compile this driver as a module, choose M here. The module will be
called lan743x.
+source "drivers/net/ethernet/microchip/lan865x/Kconfig"
source "drivers/net/ethernet/microchip/lan966x/Kconfig"
source "drivers/net/ethernet/microchip/sparx5/Kconfig"
source "drivers/net/ethernet/microchip/vcap/Kconfig"
+source "drivers/net/ethernet/microchip/fdma/Kconfig"
endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index bbd349264e6f..3c65baed9fd8 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_LAN743X) += lan743x.o
lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
+obj-$(CONFIG_LAN865X) += lan865x/
obj-$(CONFIG_LAN966X_SWITCH) += lan966x/
obj-$(CONFIG_SPARX5_SWITCH) += sparx5/
obj-$(CONFIG_VCAP) += vcap/
+obj-$(CONFIG_FDMA) += fdma/
diff --git a/drivers/net/ethernet/microchip/fdma/Kconfig b/drivers/net/ethernet/microchip/fdma/Kconfig
new file mode 100644
index 000000000000..ec228c061351
--- /dev/null
+++ b/drivers/net/ethernet/microchip/fdma/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Microchip FDMA API configuration
+#
+
+if NET_VENDOR_MICROCHIP
+
+config FDMA
+ bool "FDMA API" if COMPILE_TEST
+ help
+ Provides the basic FDMA functionality for multiple Microchip
+ switchcores.
+
+ Say Y here if you want to build the FDMA API that provides a common
+ set of functions and data structures for interacting with the Frame
+ DMA engine in multiple microchip switchcores.
+
+endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/fdma/Makefile b/drivers/net/ethernet/microchip/fdma/Makefile
new file mode 100644
index 000000000000..cc9a736be357
--- /dev/null
+++ b/drivers/net/ethernet/microchip/fdma/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for Microchip FDMA
+#
+
+obj-$(CONFIG_FDMA) += fdma.o
+fdma-y += fdma_api.o
diff --git a/drivers/net/ethernet/microchip/fdma/fdma_api.c b/drivers/net/ethernet/microchip/fdma/fdma_api.c
new file mode 100644
index 000000000000..e78c3590da9e
--- /dev/null
+++ b/drivers/net/ethernet/microchip/fdma/fdma_api.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "fdma_api.h"
+
+#include <linux/bits.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+
+/* Add a DB to a DCB, providing a callback for getting the DB dataptr. */
+static int __fdma_db_add(struct fdma *fdma, int dcb_idx, int db_idx, u64 status,
+ int (*cb)(struct fdma *fdma, int dcb_idx,
+ int db_idx, u64 *dataptr))
+{
+ struct fdma_db *db = fdma_db_get(fdma, dcb_idx, db_idx);
+
+ db->status = status;
+
+ return cb(fdma, dcb_idx, db_idx, &db->dataptr);
+}
+
+/* Add a DB to a DCB, using the callback set in the fdma_ops struct. */
+int fdma_db_add(struct fdma *fdma, int dcb_idx, int db_idx, u64 status)
+{
+ return __fdma_db_add(fdma,
+ dcb_idx,
+ db_idx,
+ status,
+ fdma->ops.dataptr_cb);
+}
+
+/* Add a DCB with callbacks for getting the DB dataptr and the DCB nextptr. */
+int __fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status,
+ int (*dcb_cb)(struct fdma *fdma, int dcb_idx, u64 *nextptr),
+ int (*db_cb)(struct fdma *fdma, int dcb_idx, int db_idx,
+ u64 *dataptr))
+{
+ struct fdma_dcb *dcb = fdma_dcb_get(fdma, dcb_idx);
+ int i, err;
+
+ for (i = 0; i < fdma->n_dbs; i++) {
+ err = __fdma_db_add(fdma, dcb_idx, i, status, db_cb);
+ if (unlikely(err))
+ return err;
+ }
+
+ err = dcb_cb(fdma, dcb_idx, &fdma->last_dcb->nextptr);
+ if (unlikely(err))
+ return err;
+
+ fdma->last_dcb = dcb;
+
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = info;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__fdma_dcb_add);
+
+/* Add a DCB, using the preset callbacks in the fdma_ops struct. */
+int fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status)
+{
+ return __fdma_dcb_add(fdma,
+ dcb_idx,
+ info, status,
+ fdma->ops.nextptr_cb,
+ fdma->ops.dataptr_cb);
+}
+EXPORT_SYMBOL_GPL(fdma_dcb_add);
+
+/* Initialize the DCB's and DB's. */
+int fdma_dcbs_init(struct fdma *fdma, u64 info, u64 status)
+{
+ int i, err;
+
+ fdma->last_dcb = fdma->dcbs;
+ fdma->db_index = 0;
+ fdma->dcb_index = 0;
+
+ for (i = 0; i < fdma->n_dcbs; i++) {
+ err = fdma_dcb_add(fdma, i, info, status);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fdma_dcbs_init);
+
+/* Allocate coherent DMA memory for FDMA. */
+int fdma_alloc_coherent(struct device *dev, struct fdma *fdma)
+{
+ fdma->dcbs = dma_alloc_coherent(dev,
+ fdma->size,
+ &fdma->dma,
+ GFP_KERNEL);
+ if (!fdma->dcbs)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fdma_alloc_coherent);
+
+/* Allocate physical memory for FDMA. */
+int fdma_alloc_phys(struct fdma *fdma)
+{
+ fdma->dcbs = kzalloc(fdma->size, GFP_KERNEL);
+ if (!fdma->dcbs)
+ return -ENOMEM;
+
+ fdma->dma = virt_to_phys(fdma->dcbs);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fdma_alloc_phys);
+
+/* Free coherent DMA memory. */
+void fdma_free_coherent(struct device *dev, struct fdma *fdma)
+{
+ dma_free_coherent(dev, fdma->size, fdma->dcbs, fdma->dma);
+}
+EXPORT_SYMBOL_GPL(fdma_free_coherent);
+
+/* Free virtual memory. */
+void fdma_free_phys(struct fdma *fdma)
+{
+ kfree(fdma->dcbs);
+}
+EXPORT_SYMBOL_GPL(fdma_free_phys);
+
+/* Get the size of the FDMA memory */
+u32 fdma_get_size(struct fdma *fdma)
+{
+ return ALIGN(sizeof(struct fdma_dcb) * fdma->n_dcbs, PAGE_SIZE);
+}
+EXPORT_SYMBOL_GPL(fdma_get_size);
+
+/* Get the size of the FDMA memory. This function is only applicable if the
+ * dataptr addresses and DCB's are in contiguous memory.
+ */
+u32 fdma_get_size_contiguous(struct fdma *fdma)
+{
+ return ALIGN(fdma->n_dcbs * sizeof(struct fdma_dcb) +
+ fdma->n_dcbs * fdma->n_dbs * fdma->db_size,
+ PAGE_SIZE);
+}
+EXPORT_SYMBOL_GPL(fdma_get_size_contiguous);
diff --git a/drivers/net/ethernet/microchip/fdma/fdma_api.h b/drivers/net/ethernet/microchip/fdma/fdma_api.h
new file mode 100644
index 000000000000..d91affe8bd98
--- /dev/null
+++ b/drivers/net/ethernet/microchip/fdma/fdma_api.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _FDMA_API_H_
+#define _FDMA_API_H_
+
+#include <linux/bits.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+
+/* This provides a common set of functions and data structures for interacting
+ * with the Frame DMA engine on multiple Microchip switchcores.
+ *
+ * Frame DMA DCB format:
+ *
+ * +---------------------------+
+ * | Next Ptr |
+ * +---------------------------+
+ * | Reserved | Info |
+ * +---------------------------+
+ * | Data0 Ptr |
+ * +---------------------------+
+ * | Reserved | Status0 |
+ * +---------------------------+
+ * | Data1 Ptr |
+ * +---------------------------+
+ * | Reserved | Status1 |
+ * +---------------------------+
+ * | Data2 Ptr |
+ * +---------------------------+
+ * | Reserved | Status2 |
+ * |-------------|-------------|
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * |---------------------------|
+ * | Data14 Ptr |
+ * +-------------|-------------+
+ * | Reserved | Status14 |
+ * +-------------|-------------+
+ *
+ * The data pointers points to the actual frame data to be received or sent. The
+ * addresses of the data pointers can, as of writing, be either a: DMA address,
+ * physical address or mapped address.
+ *
+ */
+
+#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
+#define FDMA_DCB_INFO_TOKEN BIT(17)
+#define FDMA_DCB_INFO_INTR BIT(18)
+#define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24))
+
+#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
+#define FDMA_DCB_STATUS_SOF BIT(16)
+#define FDMA_DCB_STATUS_EOF BIT(17)
+#define FDMA_DCB_STATUS_INTR BIT(18)
+#define FDMA_DCB_STATUS_DONE BIT(19)
+#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
+#define FDMA_DCB_INVALID_DATA 0x1
+
+#define FDMA_DB_MAX 15 /* Max number of DB's on Sparx5 */
+
+struct fdma;
+
+struct fdma_db {
+ u64 dataptr;
+ u64 status;
+};
+
+struct fdma_dcb {
+ u64 nextptr;
+ u64 info;
+ struct fdma_db db[FDMA_DB_MAX];
+};
+
+struct fdma_ops {
+ /* User-provided callback to set the dataptr */
+ int (*dataptr_cb)(struct fdma *fdma, int dcb_idx, int db_idx, u64 *ptr);
+ /* User-provided callback to set the nextptr */
+ int (*nextptr_cb)(struct fdma *fdma, int dcb_idx, u64 *ptr);
+};
+
+struct fdma {
+ void *priv;
+
+ /* Virtual addresses */
+ struct fdma_dcb *dcbs;
+ struct fdma_dcb *last_dcb;
+
+ /* DMA address */
+ dma_addr_t dma;
+
+ /* Size of DCB + DB memory */
+ int size;
+
+ /* Indexes used to access the next-to-be-used DCB or DB */
+ int db_index;
+ int dcb_index;
+
+ /* Number of DCB's and DB's */
+ u32 n_dcbs;
+ u32 n_dbs;
+
+ /* Size of DB's */
+ u32 db_size;
+
+ /* Channel id this FDMA object operates on */
+ u32 channel_id;
+
+ struct fdma_ops ops;
+};
+
+/* Advance the DCB index and wrap if required. */
+static inline void fdma_dcb_advance(struct fdma *fdma)
+{
+ fdma->dcb_index++;
+ if (fdma->dcb_index >= fdma->n_dcbs)
+ fdma->dcb_index = 0;
+}
+
+/* Advance the DB index. */
+static inline void fdma_db_advance(struct fdma *fdma)
+{
+ fdma->db_index++;
+}
+
+/* Reset the db index to zero. */
+static inline void fdma_db_reset(struct fdma *fdma)
+{
+ fdma->db_index = 0;
+}
+
+/* Check if a DCB can be reused in case of multiple DB's per DCB. */
+static inline bool fdma_dcb_is_reusable(struct fdma *fdma)
+{
+ return fdma->db_index != fdma->n_dbs;
+}
+
+/* Check if the FDMA has marked this DB as done. */
+static inline bool fdma_db_is_done(struct fdma_db *db)
+{
+ return db->status & FDMA_DCB_STATUS_DONE;
+}
+
+/* Get the length of a DB. */
+static inline int fdma_db_len_get(struct fdma_db *db)
+{
+ return FDMA_DCB_STATUS_BLOCKL(db->status);
+}
+
+/* Set the length of a DB. */
+static inline void fdma_dcb_len_set(struct fdma_dcb *dcb, u32 len)
+{
+ dcb->info = FDMA_DCB_INFO_DATAL(len);
+}
+
+/* Get a DB by index. */
+static inline struct fdma_db *fdma_db_get(struct fdma *fdma, int dcb_idx,
+ int db_idx)
+{
+ return &fdma->dcbs[dcb_idx].db[db_idx];
+}
+
+/* Get the next DB. */
+static inline struct fdma_db *fdma_db_next_get(struct fdma *fdma)
+{
+ return fdma_db_get(fdma, fdma->dcb_index, fdma->db_index);
+}
+
+/* Get a DCB by index. */
+static inline struct fdma_dcb *fdma_dcb_get(struct fdma *fdma, int dcb_idx)
+{
+ return &fdma->dcbs[dcb_idx];
+}
+
+/* Get the next DCB. */
+static inline struct fdma_dcb *fdma_dcb_next_get(struct fdma *fdma)
+{
+ return fdma_dcb_get(fdma, fdma->dcb_index);
+}
+
+/* Check if the FDMA has frames ready for extraction. */
+static inline bool fdma_has_frames(struct fdma *fdma)
+{
+ return fdma_db_is_done(fdma_db_next_get(fdma));
+}
+
+/* Get a nextptr by index */
+static inline int fdma_nextptr_cb(struct fdma *fdma, int dcb_idx, u64 *nextptr)
+{
+ *nextptr = fdma->dma + (sizeof(struct fdma_dcb) * dcb_idx);
+ return 0;
+}
+
+/* Get the DMA address of a dataptr, by index. This function is only applicable
+ * if the dataptr addresses and DCB's are in contiguous memory and the driver
+ * supports XDP.
+ */
+static inline u64 fdma_dataptr_get_contiguous(struct fdma *fdma, int dcb_idx,
+ int db_idx)
+{
+ return fdma->dma + (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
+ (dcb_idx * fdma->n_dbs + db_idx) * fdma->db_size +
+ XDP_PACKET_HEADROOM;
+}
+
+/* Get the virtual address of a dataptr, by index. This function is only
+ * applicable if the dataptr addresses and DCB's are in contiguous memory and
+ * the driver supports XDP.
+ */
+static inline void *fdma_dataptr_virt_get_contiguous(struct fdma *fdma,
+ int dcb_idx, int db_idx)
+{
+ return (u8 *)fdma->dcbs + (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
+ (dcb_idx * fdma->n_dbs + db_idx) * fdma->db_size +
+ XDP_PACKET_HEADROOM;
+}
+
+/* Check if this DCB is the last used DCB. */
+static inline bool fdma_is_last(struct fdma *fdma, struct fdma_dcb *dcb)
+{
+ return dcb == fdma->last_dcb;
+}
+
+int fdma_dcbs_init(struct fdma *fdma, u64 info, u64 status);
+int fdma_db_add(struct fdma *fdma, int dcb_idx, int db_idx, u64 status);
+int fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status);
+int __fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status,
+ int (*dcb_cb)(struct fdma *fdma, int dcb_idx, u64 *nextptr),
+ int (*db_cb)(struct fdma *fdma, int dcb_idx, int db_idx,
+ u64 *dataptr));
+
+int fdma_alloc_coherent(struct device *dev, struct fdma *fdma);
+int fdma_alloc_phys(struct fdma *fdma);
+
+void fdma_free_coherent(struct device *dev, struct fdma *fdma);
+void fdma_free_phys(struct fdma *fdma);
+
+u32 fdma_get_size(struct fdma *fdma);
+u32 fdma_get_size_contiguous(struct fdma *fdma);
+
+#endif
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 3a63ec091413..1a1cbd034eda 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1034,16 +1034,12 @@ static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
struct lan743x_adapter *adapter = netdev_priv(netdev);
ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (adapter->ptp.ptp_clock)
ts_info->phc_index = ptp_clock_index(adapter->ptp.ptp_clock);
- else
- ts_info->phc_index = -1;
ts_info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON) |
@@ -1058,61 +1054,55 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- u32 buf;
- int ret;
-
- if (!phydev)
- return -EIO;
- if (!phydev->drv) {
- netif_err(adapter, drv, adapter->netdev,
- "Missing PHY Driver\n");
- return -EIO;
- }
- ret = phy_ethtool_get_eee(phydev, eee);
- if (ret < 0)
- return ret;
+ eee->tx_lpi_timer = lan743x_csr_read(adapter,
+ MAC_EEE_TX_LPI_REQ_DLY_CNT);
- buf = lan743x_csr_read(adapter, MAC_CR);
- if (buf & MAC_CR_EEE_EN_) {
- /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
- buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
- eee->tx_lpi_timer = buf;
- } else {
- eee->tx_lpi_timer = 0;
- }
-
- return 0;
+ return phylink_ethtool_get_eee(adapter->phylink, eee);
}
static int lan743x_ethtool_set_eee(struct net_device *netdev,
struct ethtool_keee *eee)
{
- struct lan743x_adapter *adapter;
- struct phy_device *phydev;
- u32 buf = 0;
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 tx_lpi_timer;
- if (!netdev)
- return -EINVAL;
- adapter = netdev_priv(netdev);
- if (!adapter)
- return -EINVAL;
- phydev = netdev->phydev;
- if (!phydev)
- return -EIO;
- if (!phydev->drv) {
- netif_err(adapter, drv, adapter->netdev,
- "Missing PHY Driver\n");
- return -EIO;
- }
+ tx_lpi_timer = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
+ if (tx_lpi_timer != eee->tx_lpi_timer) {
+ u32 mac_cr = lan743x_csr_read(adapter, MAC_CR);
+
+ /* Software should only change this field when Energy Efficient
+ * Ethernet Enable (EEEEN) is cleared.
+ * This function will trigger an autonegotiation restart and
+ * eee will be reenabled during link up if eee was negotiated.
+ */
+ lan743x_mac_eee_enable(adapter, false);
+ lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT,
+ eee->tx_lpi_timer);
- if (eee->eee_enabled) {
- buf = (u32)eee->tx_lpi_timer;
- lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf);
+ if (mac_cr & MAC_CR_EEE_EN_)
+ lan743x_mac_eee_enable(adapter, true);
}
- return phy_ethtool_set_eee(phydev, eee);
+ return phylink_ethtool_set_eee(adapter->phylink, eee);
+}
+
+static int
+lan743x_ethtool_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_set(adapter->phylink, cmd);
+}
+
+static int
+lan743x_ethtool_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_get(adapter->phylink, cmd);
}
#ifdef CONFIG_PM
@@ -1124,8 +1114,7 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
wol->supported = 0;
wol->wolopts = 0;
- if (netdev->phydev)
- phy_ethtool_get_wol(netdev->phydev, wol);
+ phylink_ethtool_get_wol(adapter->phylink, wol);
if (wol->supported != adapter->phy_wol_supported)
netif_warn(adapter, drv, adapter->netdev,
@@ -1166,7 +1155,7 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
!(adapter->phy_wol_supported & WAKE_MAGICSECURE))
phy_wol.wolopts &= ~WAKE_MAGIC;
- ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol);
+ ret = phylink_ethtool_set_wol(adapter->phylink, wol);
if (ret && (ret != -EOPNOTSUPP))
return ret;
@@ -1355,44 +1344,16 @@ static void lan743x_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct lan743x_adapter *adapter = netdev_priv(dev);
- struct lan743x_phy *phy = &adapter->phy;
- if (phy->fc_request_control & FLOW_CTRL_TX)
- pause->tx_pause = 1;
- if (phy->fc_request_control & FLOW_CTRL_RX)
- pause->rx_pause = 1;
- pause->autoneg = phy->fc_autoneg;
+ phylink_ethtool_get_pauseparam(adapter->phylink, pause);
}
static int lan743x_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct lan743x_adapter *adapter = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- struct lan743x_phy *phy = &adapter->phy;
-
- if (!phydev)
- return -ENODEV;
-
- if (!phy_validate_pause(phydev, pause))
- return -EINVAL;
-
- phy->fc_request_control = 0;
- if (pause->rx_pause)
- phy->fc_request_control |= FLOW_CTRL_RX;
- if (pause->tx_pause)
- phy->fc_request_control |= FLOW_CTRL_TX;
-
- phy->fc_autoneg = pause->autoneg;
-
- if (pause->autoneg == AUTONEG_DISABLE)
- lan743x_mac_flow_ctrl_set_enables(adapter, pause->tx_pause,
- pause->rx_pause);
- else
- phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
-
- return 0;
+ return phylink_ethtool_set_pauseparam(adapter->phylink, pause);
}
const struct ethtool_ops lan743x_ethtool_ops = {
@@ -1417,8 +1378,8 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.get_ts_info = lan743x_ethtool_get_ts_info,
.get_eee = lan743x_ethtool_get_eee,
.set_eee = lan743x_ethtool_set_eee,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = lan743x_ethtool_get_link_ksettings,
+ .set_link_ksettings = lan743x_ethtool_set_link_ksettings,
.get_regs_len = lan743x_get_regs_len,
.get_regs = lan743x_get_regs,
.get_pauseparam = lan743x_get_pauseparam,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e418539565b1..4dc5adcda6a3 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -15,6 +15,7 @@
#include <linux/rtnetlink.h>
#include <linux/iopoll.h>
#include <linux/crc16.h>
+#include <linux/phylink.h>
#include "lan743x_main.h"
#include "lan743x_ethtool.h"
@@ -992,6 +993,42 @@ static int lan743x_sgmii_write(struct lan743x_adapter *adapter,
return ret;
}
+static int lan743x_get_lsd(int speed, int duplex, u8 mss)
+{
+ int lsd;
+
+ switch (speed) {
+ case SPEED_2500:
+ if (mss == MASTER_SLAVE_STATE_SLAVE)
+ lsd = LINK_2500_SLAVE;
+ else
+ lsd = LINK_2500_MASTER;
+ break;
+ case SPEED_1000:
+ if (mss == MASTER_SLAVE_STATE_SLAVE)
+ lsd = LINK_1000_SLAVE;
+ else
+ lsd = LINK_1000_MASTER;
+ break;
+ case SPEED_100:
+ if (duplex == DUPLEX_FULL)
+ lsd = LINK_100FD;
+ else
+ lsd = LINK_100HD;
+ break;
+ case SPEED_10:
+ if (duplex == DUPLEX_FULL)
+ lsd = LINK_10FD;
+ else
+ lsd = LINK_10HD;
+ break;
+ default:
+ lsd = -EINVAL;
+ }
+
+ return lsd;
+}
+
static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter,
u16 baud)
{
@@ -1041,26 +1078,7 @@ static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter,
VR_MII_BAUD_RATE_1P25GBPS);
}
-static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter,
- bool *status)
-{
- int ret;
-
- ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
- VR_MII_GEN2_4_MPLL_CTRL1);
- if (ret < 0)
- return ret;
-
- if (ret == VR_MII_MPLL_MULTIPLIER_125 ||
- ret == VR_MII_MPLL_MULTIPLIER_50)
- *status = true;
- else
- *status = false;
-
- return 0;
-}
-
-static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter)
+static int lan743x_serdes_clock_and_aneg_update(struct lan743x_adapter *adapter)
{
enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd;
int mii_ctrl;
@@ -1147,68 +1165,11 @@ static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state)
return 0;
}
-static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
+static int lan743x_pcs_power_reset(struct lan743x_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- struct phy_device *phydev = netdev->phydev;
- enum lan743x_sgmii_lsd lsd = POWER_DOWN;
int mii_ctl;
- bool status;
int ret;
- switch (phydev->speed) {
- case SPEED_2500:
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
- lsd = LINK_2500_MASTER;
- else
- lsd = LINK_2500_SLAVE;
- break;
- case SPEED_1000:
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
- lsd = LINK_1000_MASTER;
- else
- lsd = LINK_1000_SLAVE;
- break;
- case SPEED_100:
- if (phydev->duplex)
- lsd = LINK_100FD;
- else
- lsd = LINK_100HD;
- break;
- case SPEED_10:
- if (phydev->duplex)
- lsd = LINK_10FD;
- else
- lsd = LINK_10HD;
- break;
- default:
- netif_err(adapter, drv, adapter->netdev,
- "Invalid speed %d\n", phydev->speed);
- return -EINVAL;
- }
-
- adapter->sgmii_lsd = lsd;
- ret = lan743x_sgmii_aneg_update(adapter);
- if (ret < 0) {
- netif_err(adapter, drv, adapter->netdev,
- "error %d SGMII cfg failed\n", ret);
- return ret;
- }
-
- ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
- if (ret < 0) {
- netif_err(adapter, drv, adapter->netdev,
- "error %d SGMII get mode failed\n", ret);
- return ret;
- }
-
- if (status)
- netif_dbg(adapter, drv, adapter->netdev,
- "SGMII 2.5G mode enable\n");
- else
- netif_dbg(adapter, drv, adapter->netdev,
- "SGMII 1G mode enable\n");
-
/* SGMII/1000/2500BASE-X PCS power down */
mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
if (mii_ctl < 0)
@@ -1229,11 +1190,7 @@ static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
if (ret < 0)
return ret;
- ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
- if (ret < 0)
- return ret;
-
- return 0;
+ return lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
}
static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
@@ -1389,103 +1346,11 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
50000, 1000000);
}
-static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
- u16 local_adv, u16 remote_adv)
-{
- struct lan743x_phy *phy = &adapter->phy;
- u8 cap;
-
- if (phy->fc_autoneg)
- cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
- else
- cap = phy->fc_request_control;
-
- lan743x_mac_flow_ctrl_set_enables(adapter,
- cap & FLOW_CTRL_TX,
- cap & FLOW_CTRL_RX);
-}
-
static int lan743x_phy_init(struct lan743x_adapter *adapter)
{
return lan743x_phy_reset(adapter);
}
-static void lan743x_phy_link_status_change(struct net_device *netdev)
-{
- struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- u32 data;
-
- phy_print_status(phydev);
- if (phydev->state == PHY_RUNNING) {
- int remote_advertisement = 0;
- int local_advertisement = 0;
-
- data = lan743x_csr_read(adapter, MAC_CR);
-
- /* set duplex mode */
- if (phydev->duplex)
- data |= MAC_CR_DPX_;
- else
- data &= ~MAC_CR_DPX_;
-
- /* set bus speed */
- switch (phydev->speed) {
- case SPEED_10:
- data &= ~MAC_CR_CFG_H_;
- data &= ~MAC_CR_CFG_L_;
- break;
- case SPEED_100:
- data &= ~MAC_CR_CFG_H_;
- data |= MAC_CR_CFG_L_;
- break;
- case SPEED_1000:
- data |= MAC_CR_CFG_H_;
- data &= ~MAC_CR_CFG_L_;
- break;
- case SPEED_2500:
- data |= MAC_CR_CFG_H_;
- data |= MAC_CR_CFG_L_;
- break;
- }
- lan743x_csr_write(adapter, MAC_CR, data);
-
- local_advertisement =
- linkmode_adv_to_mii_adv_t(phydev->advertising);
- remote_advertisement =
- linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
-
- lan743x_phy_update_flowcontrol(adapter, local_advertisement,
- remote_advertisement);
- lan743x_ptp_update_latency(adapter, phydev->speed);
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII ||
- phydev->interface == PHY_INTERFACE_MODE_1000BASEX ||
- phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
- lan743x_sgmii_config(adapter);
-
- data = lan743x_csr_read(adapter, MAC_CR);
- if (phydev->enable_tx_lpi)
- data |= MAC_CR_EEE_EN_;
- else
- data &= ~MAC_CR_EEE_EN_;
- lan743x_csr_write(adapter, MAC_CR, data);
- }
-}
-
-static void lan743x_phy_close(struct lan743x_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct phy_device *phydev = netdev->phydev;
-
- phy_stop(netdev->phydev);
- phy_disconnect(netdev->phydev);
-
- /* using phydev here as phy_disconnect NULLs netdev->phydev */
- if (phy_is_pseudo_fixed_link(phydev))
- fixed_phy_unregister(phydev);
-
-}
-
static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
{
u32 id_rev;
@@ -1502,65 +1367,9 @@ static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
adapter->phy_interface = PHY_INTERFACE_MODE_MII;
else
adapter->phy_interface = PHY_INTERFACE_MODE_RGMII;
-}
-
-static int lan743x_phy_open(struct lan743x_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct lan743x_phy *phy = &adapter->phy;
- struct fixed_phy_status fphy_status = {
- .link = 1,
- .speed = SPEED_1000,
- .duplex = DUPLEX_FULL,
- };
- struct phy_device *phydev;
- int ret = -EIO;
-
- /* try devicetree phy, or fixed link */
- phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node,
- lan743x_phy_link_status_change);
-
- if (!phydev) {
- /* try internal phy */
- phydev = phy_find_first(adapter->mdiobus);
- if (!phydev) {
- if ((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
- ID_REV_ID_LAN7431_) {
- phydev = fixed_phy_register(PHY_POLL,
- &fphy_status, NULL);
- if (IS_ERR(phydev)) {
- netdev_err(netdev, "No PHY/fixed_PHY found\n");
- return PTR_ERR(phydev);
- }
- } else {
- goto return_error;
- }
- }
-
- lan743x_phy_interface_select(adapter);
-
- ret = phy_connect_direct(netdev, phydev,
- lan743x_phy_link_status_change,
- adapter->phy_interface);
- if (ret)
- goto return_error;
- }
-
- /* MAC doesn't support 1000T Half */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-
- /* support both flow controls */
- phy_support_asym_pause(phydev);
- phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
- phy->fc_autoneg = phydev->autoneg;
-
- phy_start(phydev);
- phy_start_aneg(phydev);
- phy_attached_info(phydev);
- return 0;
-return_error:
- return ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "selected phy interface: 0x%X\n", adapter->phy_interface);
}
static void lan743x_rfe_open(struct lan743x_adapter *adapter)
@@ -3061,6 +2870,336 @@ return_error:
return ret;
}
+static int lan743x_phylink_sgmii_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from the External PHY via SGMII */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl &= ~SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d sgmii aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+static int lan743x_phylink_1000basex_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from 1000BASE-X PCS link status */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d 1000basex aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+static int lan743x_phylink_2500basex_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_2500, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from 2500BASE-X PCS link status */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d 2500basex aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
+{
+ u32 mac_cr;
+
+ mac_cr = lan743x_csr_read(adapter, MAC_CR);
+ if (enable)
+ mac_cr |= MAC_CR_EEE_EN_;
+ else
+ mac_cr &= ~MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, mac_cr);
+}
+
+static void lan743x_phylink_mac_config(struct phylink_config *config,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_2500BASEX:
+ ret = lan743x_phylink_2500basex_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "2500BASEX config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "2500BASEX mode selected and configured\n");
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ ret = lan743x_phylink_1000basex_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "1000BASEX config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "1000BASEX mode selected and configured\n");
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ ret = lan743x_phylink_sgmii_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "SGMII config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII mode selected and configured\n");
+ break;
+ default:
+ netif_dbg(adapter, drv, adapter->netdev,
+ "RGMII/GMII/MII(0x%X) mode enable\n",
+ state->interface);
+ break;
+ }
+}
+
+static void lan743x_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int link_an_mode,
+ phy_interface_t interface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ netif_tx_stop_all_queues(to_net_dev(config->dev));
+ lan743x_mac_eee_enable(adapter, false);
+}
+
+static void lan743x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int mac_cr;
+ u8 cap;
+
+ mac_cr = lan743x_csr_read(adapter, MAC_CR);
+ /* Pre-initialize register bits.
+ * Resulting value corresponds to SPEED_10
+ */
+ mac_cr &= ~(MAC_CR_CFG_H_ | MAC_CR_CFG_L_);
+ if (speed == SPEED_2500)
+ mac_cr |= MAC_CR_CFG_H_ | MAC_CR_CFG_L_;
+ else if (speed == SPEED_1000)
+ mac_cr |= MAC_CR_CFG_H_;
+ else if (speed == SPEED_100)
+ mac_cr |= MAC_CR_CFG_L_;
+
+ lan743x_csr_write(adapter, MAC_CR, mac_cr);
+
+ lan743x_ptp_update_latency(adapter, speed);
+
+ /* Flow Control operation */
+ cap = 0;
+ if (tx_pause)
+ cap |= FLOW_CTRL_TX;
+ if (rx_pause)
+ cap |= FLOW_CTRL_RX;
+
+ lan743x_mac_flow_ctrl_set_enables(adapter,
+ cap & FLOW_CTRL_TX,
+ cap & FLOW_CTRL_RX);
+
+ if (phydev)
+ lan743x_mac_eee_enable(adapter, phydev->enable_tx_lpi);
+
+ netif_tx_wake_all_queues(netdev);
+}
+
+static const struct phylink_mac_ops lan743x_phylink_mac_ops = {
+ .mac_config = lan743x_phylink_mac_config,
+ .mac_link_down = lan743x_phylink_mac_link_down,
+ .mac_link_up = lan743x_phylink_mac_link_up,
+};
+
+static int lan743x_phylink_create(struct lan743x_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct phylink *pl;
+
+ adapter->phylink_config.dev = &netdev->dev;
+ adapter->phylink_config.type = PHYLINK_NETDEV;
+ adapter->phylink_config.mac_managed_pm = false;
+
+ adapter->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+ lan743x_phy_interface_select(adapter);
+
+ switch (adapter->phy_interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ adapter->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ adapter->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ adapter->phylink_config.supported_interfaces);
+ adapter->phylink_config.mac_capabilities |= MAC_2500FD;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ adapter->phylink_config.supported_interfaces);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ adapter->phylink_config.supported_interfaces);
+ break;
+ default:
+ phy_interface_set_rgmii(adapter->phylink_config.supported_interfaces);
+ }
+
+ pl = phylink_create(&adapter->phylink_config, NULL,
+ adapter->phy_interface, &lan743x_phylink_mac_ops);
+
+ if (IS_ERR(pl)) {
+ netdev_err(netdev, "Could not create phylink (%pe)\n", pl);
+ return PTR_ERR(pl);
+ }
+
+ adapter->phylink = pl;
+ netdev_dbg(netdev, "lan743x phylink created");
+
+ return 0;
+}
+
+static bool lan743x_phy_handle_exists(struct device_node *dn)
+{
+ dn = of_parse_phandle(dn, "phy-handle", 0);
+ of_node_put(dn);
+ return dn != NULL;
+}
+
+static int lan743x_phylink_connect(struct lan743x_adapter *adapter)
+{
+ struct device_node *dn = adapter->pdev->dev.of_node;
+ struct net_device *dev = adapter->netdev;
+ struct phy_device *phydev;
+ int ret;
+
+ if (dn)
+ ret = phylink_of_phy_connect(adapter->phylink, dn, 0);
+
+ if (!dn || (ret && !lan743x_phy_handle_exists(dn))) {
+ phydev = phy_find_first(adapter->mdiobus);
+ if (phydev) {
+ /* attach the mac to the phy */
+ ret = phylink_connect_phy(adapter->phylink, phydev);
+ } else if (((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
+ ID_REV_ID_LAN7431_) || adapter->is_pci11x1x) {
+ struct phylink_link_state state;
+ unsigned long caps;
+
+ caps = adapter->phylink_config.mac_capabilities;
+ if (caps & MAC_2500FD) {
+ state.speed = SPEED_2500;
+ state.duplex = DUPLEX_FULL;
+ } else if (caps & MAC_1000FD) {
+ state.speed = SPEED_1000;
+ state.duplex = DUPLEX_FULL;
+ } else {
+ state.speed = SPEED_UNKNOWN;
+ state.duplex = DUPLEX_UNKNOWN;
+ }
+
+ ret = phylink_set_fixed_link(adapter->phylink, &state);
+ if (ret) {
+ netdev_err(dev, "Could not set fixed link\n");
+ return ret;
+ }
+ } else {
+ netdev_err(dev, "no PHY found\n");
+ return -ENXIO;
+ }
+ }
+
+ if (ret) {
+ netdev_err(dev, "Could not attach PHY (%d)\n", ret);
+ return ret;
+ }
+
+ phylink_start(adapter->phylink);
+
+ return 0;
+}
+
+static void lan743x_phylink_disconnect(struct lan743x_adapter *adapter)
+{
+ phylink_stop(adapter->phylink);
+ phylink_disconnect_phy(adapter->phylink);
+}
+
static int lan743x_netdev_close(struct net_device *netdev)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
@@ -3074,7 +3213,7 @@ static int lan743x_netdev_close(struct net_device *netdev)
lan743x_ptp_close(adapter);
- lan743x_phy_close(adapter);
+ lan743x_phylink_disconnect(adapter);
lan743x_mac_close(adapter);
@@ -3097,13 +3236,13 @@ static int lan743x_netdev_open(struct net_device *netdev)
if (ret)
goto close_intr;
- ret = lan743x_phy_open(adapter);
+ ret = lan743x_phylink_connect(adapter);
if (ret)
goto close_mac;
ret = lan743x_ptp_open(adapter);
if (ret)
- goto close_phy;
+ goto close_mac;
lan743x_rfe_open(adapter);
@@ -3119,6 +3258,9 @@ static int lan743x_netdev_open(struct net_device *netdev)
goto close_tx;
}
+ if (netdev->phydev)
+ phy_support_eee(netdev->phydev);
+
#ifdef CONFIG_PM
if (adapter->netdev->phydev) {
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
@@ -3143,9 +3285,8 @@ close_rx:
lan743x_rx_close(&adapter->rx[index]);
}
lan743x_ptp_close(adapter);
-
-close_phy:
- lan743x_phy_close(adapter);
+ if (adapter->phylink)
+ lan743x_phylink_disconnect(adapter);
close_mac:
lan743x_mac_close(adapter);
@@ -3174,11 +3315,14 @@ static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
static int lan743x_netdev_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
if (!netif_running(netdev))
return -EINVAL;
if (cmd == SIOCSHWTSTAMP)
return lan743x_ptp_ioctl(netdev, ifr, cmd);
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+
+ return phylink_mii_ioctl(adapter->phylink, ifr, cmd);
}
static void lan743x_netdev_set_multicast(struct net_device *netdev)
@@ -3283,10 +3427,17 @@ static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
mdiobus_unregister(adapter->mdiobus);
}
+static void lan743x_destroy_phylink(struct lan743x_adapter *adapter)
+{
+ phylink_destroy(adapter->phylink);
+ adapter->phylink = NULL;
+}
+
static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
{
unregister_netdev(adapter->netdev);
+ lan743x_destroy_phylink(adapter);
lan743x_mdiobus_cleanup(adapter);
lan743x_hardware_cleanup(adapter);
lan743x_pci_cleanup(adapter);
@@ -3500,14 +3651,21 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
adapter->netdev->hw_features = adapter->netdev->features;
- /* carrier off reporting is important to ethtool even BEFORE open */
- netif_carrier_off(netdev);
+ ret = lan743x_phylink_create(adapter);
+ if (ret < 0) {
+ netif_err(adapter, probe, netdev,
+ "failed to setup phylink (%d)\n", ret);
+ goto cleanup_mdiobus;
+ }
ret = register_netdev(adapter->netdev);
if (ret < 0)
- goto cleanup_mdiobus;
+ goto cleanup_phylink;
return 0;
+cleanup_phylink:
+ lan743x_destroy_phylink(adapter);
+
cleanup_mdiobus:
lan743x_mdiobus_cleanup(adapter);
@@ -3763,6 +3921,7 @@ static int lan743x_pm_resume(struct device *dev)
MAC_WK_SRC_WK_FR_SAVED_;
lan743x_csr_write(adapter, MAC_WK_SRC, data);
+ rtnl_lock();
/* open netdev when netdev is at running state while resume.
* For instance, it is true when system wakesup after pm-suspend
* However, it is false when system wakes up after suspend GUI menu
@@ -3771,6 +3930,7 @@ static int lan743x_pm_resume(struct device *dev)
lan743x_netdev_open(netdev);
netif_device_attach(netdev);
+ rtnl_unlock();
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 3b2585a384e2..8ef897c114d3 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -5,6 +5,7 @@
#define _LAN743X_H
#include <linux/phy.h>
+#include <linux/phylink.h>
#include "lan743x_ptp.h"
#define DRIVER_AUTHOR "Bryan Whitehead <Bryan.Whitehead@microchip.com>"
@@ -1083,6 +1084,8 @@ struct lan743x_adapter {
u32 flags;
u32 hw_cfg;
phy_interface_t phy_interface;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
@@ -1203,5 +1206,6 @@ void lan743x_hs_syslock_release(struct lan743x_adapter *adapter);
void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
bool tx_enable, bool rx_enable);
int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr);
+void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable);
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index dcea6652d56d..4a777b449ecd 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -401,28 +401,21 @@ static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci,
u32 nano_seconds = 0;
u32 seconds = 0;
- if (ts) {
- if (ts->tv_sec > 0xFFFFFFFFLL ||
- ts->tv_sec < 0) {
- netif_warn(adapter, drv, adapter->netdev,
- "ts->tv_sec out of range, %lld\n",
- ts->tv_sec);
- return -ERANGE;
- }
- if (ts->tv_nsec >= 1000000000L ||
- ts->tv_nsec < 0) {
- netif_warn(adapter, drv, adapter->netdev,
- "ts->tv_nsec out of range, %ld\n",
- ts->tv_nsec);
- return -ERANGE;
- }
- seconds = ts->tv_sec;
- nano_seconds = ts->tv_nsec;
- lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0);
- } else {
- netif_warn(adapter, drv, adapter->netdev, "ts == NULL\n");
- return -EINVAL;
+ if (ts->tv_sec > 0xFFFFFFFFLL) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ts->tv_sec out of range, %lld\n",
+ ts->tv_sec);
+ return -ERANGE;
+ }
+ if (ts->tv_nsec < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ts->tv_nsec out of range, %ld\n",
+ ts->tv_nsec);
+ return -ERANGE;
}
+ seconds = ts->tv_sec;
+ nano_seconds = ts->tv_nsec;
+ lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan865x/Kconfig b/drivers/net/ethernet/microchip/lan865x/Kconfig
new file mode 100644
index 000000000000..7f2a4e7e1915
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan865x/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Microchip LAN865x Driver Support
+#
+
+if NET_VENDOR_MICROCHIP
+
+config LAN865X
+ tristate "LAN865x support"
+ depends on SPI
+ select OA_TC6
+ help
+ Support for the Microchip LAN8650/1 Rev.B0/B1 MACPHY Ethernet chip. It
+ uses OPEN Alliance 10BASE-T1x Serial Interface specification.
+
+ To compile this driver as a module, choose M here. The module will be
+ called lan865x.
+
+endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/lan865x/Makefile b/drivers/net/ethernet/microchip/lan865x/Makefile
new file mode 100644
index 000000000000..9f5dd89c1eb8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan865x/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip LAN865x Driver
+#
+
+obj-$(CONFIG_LAN865X) += lan865x.o
diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c
new file mode 100644
index 000000000000..dd436bdff0f8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Microchip's LAN865x 10BASE-T1S MAC-PHY driver
+ *
+ * Author: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/oa_tc6.h>
+
+#define DRV_NAME "lan8650"
+
+/* MAC Network Control Register */
+#define LAN865X_REG_MAC_NET_CTL 0x00010000
+#define MAC_NET_CTL_TXEN BIT(3) /* Transmit Enable */
+#define MAC_NET_CTL_RXEN BIT(2) /* Receive Enable */
+
+/* MAC Network Configuration Reg */
+#define LAN865X_REG_MAC_NET_CFG 0x00010001
+#define MAC_NET_CFG_PROMISCUOUS_MODE BIT(4)
+#define MAC_NET_CFG_MULTICAST_MODE BIT(6)
+#define MAC_NET_CFG_UNICAST_MODE BIT(7)
+
+/* MAC Hash Register Bottom */
+#define LAN865X_REG_MAC_L_HASH 0x00010020
+/* MAC Hash Register Top */
+#define LAN865X_REG_MAC_H_HASH 0x00010021
+/* MAC Specific Addr 1 Bottom Reg */
+#define LAN865X_REG_MAC_L_SADDR1 0x00010022
+/* MAC Specific Addr 1 Top Reg */
+#define LAN865X_REG_MAC_H_SADDR1 0x00010023
+
+struct lan865x_priv {
+ struct work_struct multicast_work;
+ struct net_device *netdev;
+ struct spi_device *spi;
+ struct oa_tc6 *tc6;
+};
+
+static int lan865x_set_hw_macaddr_low_bytes(struct oa_tc6 *tc6, const u8 *mac)
+{
+ u32 regval;
+
+ regval = (mac[3] << 24) | (mac[2] << 16) | (mac[1] << 8) | mac[0];
+
+ return oa_tc6_write_register(tc6, LAN865X_REG_MAC_L_SADDR1, regval);
+}
+
+static int lan865x_set_hw_macaddr(struct lan865x_priv *priv, const u8 *mac)
+{
+ int restore_ret;
+ u32 regval;
+ int ret;
+
+ /* Configure MAC address low bytes */
+ ret = lan865x_set_hw_macaddr_low_bytes(priv->tc6, mac);
+ if (ret)
+ return ret;
+
+ /* Prepare and configure MAC address high bytes */
+ regval = (mac[5] << 8) | mac[4];
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_SADDR1,
+ regval);
+ if (!ret)
+ return 0;
+
+ /* Restore the old MAC address low bytes from netdev if the new MAC
+ * address high bytes setting failed.
+ */
+ restore_ret = lan865x_set_hw_macaddr_low_bytes(priv->tc6,
+ priv->netdev->dev_addr);
+ if (restore_ret)
+ return restore_ret;
+
+ return ret;
+}
+
+static const struct ethtool_ops lan865x_ethtool_ops = {
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static int lan865x_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+ struct sockaddr *address = addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(netdev, addr);
+ if (ret < 0)
+ return ret;
+
+ if (ether_addr_equal(address->sa_data, netdev->dev_addr))
+ return 0;
+
+ ret = lan865x_set_hw_macaddr(priv, address->sa_data);
+ if (ret)
+ return ret;
+
+ eth_commit_mac_addr_change(netdev, addr);
+
+ return 0;
+}
+
+static u32 get_address_bit(u8 addr[ETH_ALEN], u32 bit)
+{
+ return ((addr[bit / 8]) >> (bit % 8)) & 1;
+}
+
+static u32 lan865x_hash(u8 addr[ETH_ALEN])
+{
+ u32 hash_index = 0;
+
+ for (int i = 0; i < 6; i++) {
+ u32 hash = 0;
+
+ for (int j = 0; j < 8; j++)
+ hash ^= get_address_bit(addr, (j * 6) + i);
+
+ hash_index |= (hash << i);
+ }
+
+ return hash_index;
+}
+
+static int lan865x_set_specific_multicast_addr(struct lan865x_priv *priv)
+{
+ struct netdev_hw_addr *ha;
+ u32 hash_lo = 0;
+ u32 hash_hi = 0;
+ int ret;
+
+ netdev_for_each_mc_addr(ha, priv->netdev) {
+ u32 bit_num = lan865x_hash(ha->addr);
+
+ if (bit_num >= BIT(5))
+ hash_hi |= (1 << (bit_num - BIT(5)));
+ else
+ hash_lo |= (1 << bit_num);
+ }
+
+ /* Enabling specific multicast addresses */
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH, hash_hi);
+ if (ret) {
+ netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH, hash_lo);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int lan865x_set_all_multicast_addr(struct lan865x_priv *priv)
+{
+ int ret;
+
+ /* Enabling all multicast addresses */
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH,
+ 0xffffffff);
+ if (ret) {
+ netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH,
+ 0xffffffff);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int lan865x_clear_all_multicast_addr(struct lan865x_priv *priv)
+{
+ int ret;
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH, 0);
+ if (ret) {
+ netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH, 0);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n",
+ ret);
+
+ return ret;
+}
+
+static void lan865x_multicast_work_handler(struct work_struct *work)
+{
+ struct lan865x_priv *priv = container_of(work, struct lan865x_priv,
+ multicast_work);
+ u32 regval = 0;
+ int ret;
+
+ if (priv->netdev->flags & IFF_PROMISC) {
+ /* Enabling promiscuous mode */
+ regval |= MAC_NET_CFG_PROMISCUOUS_MODE;
+ regval &= (~MAC_NET_CFG_MULTICAST_MODE);
+ regval &= (~MAC_NET_CFG_UNICAST_MODE);
+ } else if (priv->netdev->flags & IFF_ALLMULTI) {
+ /* Enabling all multicast mode */
+ if (lan865x_set_all_multicast_addr(priv))
+ return;
+
+ regval &= (~MAC_NET_CFG_PROMISCUOUS_MODE);
+ regval |= MAC_NET_CFG_MULTICAST_MODE;
+ regval &= (~MAC_NET_CFG_UNICAST_MODE);
+ } else if (!netdev_mc_empty(priv->netdev)) {
+ /* Enabling specific multicast mode */
+ if (lan865x_set_specific_multicast_addr(priv))
+ return;
+
+ regval &= (~MAC_NET_CFG_PROMISCUOUS_MODE);
+ regval |= MAC_NET_CFG_MULTICAST_MODE;
+ regval &= (~MAC_NET_CFG_UNICAST_MODE);
+ } else {
+ /* Enabling local mac address only */
+ if (lan865x_clear_all_multicast_addr(priv))
+ return;
+ }
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CFG, regval);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to enable promiscuous/multicast/normal mode: %d\n",
+ ret);
+}
+
+static void lan865x_set_multicast_list(struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+
+ schedule_work(&priv->multicast_work);
+}
+
+static netdev_tx_t lan865x_send_packet(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+
+ return oa_tc6_start_xmit(priv->tc6, skb);
+}
+
+static int lan865x_hw_disable(struct lan865x_priv *priv)
+{
+ u32 regval;
+
+ if (oa_tc6_read_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, &regval))
+ return -ENODEV;
+
+ regval &= ~(MAC_NET_CTL_TXEN | MAC_NET_CTL_RXEN);
+
+ if (oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, regval))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int lan865x_net_close(struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ netif_stop_queue(netdev);
+ phy_stop(netdev->phydev);
+ ret = lan865x_hw_disable(priv);
+ if (ret) {
+ netdev_err(netdev, "Failed to disable the hardware: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lan865x_hw_enable(struct lan865x_priv *priv)
+{
+ u32 regval;
+
+ if (oa_tc6_read_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, &regval))
+ return -ENODEV;
+
+ regval |= MAC_NET_CTL_TXEN | MAC_NET_CTL_RXEN;
+
+ if (oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, regval))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int lan865x_net_open(struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ ret = lan865x_hw_enable(priv);
+ if (ret) {
+ netdev_err(netdev, "Failed to enable hardware: %d\n", ret);
+ return ret;
+ }
+
+ phy_start(netdev->phydev);
+
+ return 0;
+}
+
+static const struct net_device_ops lan865x_netdev_ops = {
+ .ndo_open = lan865x_net_open,
+ .ndo_stop = lan865x_net_close,
+ .ndo_start_xmit = lan865x_send_packet,
+ .ndo_set_rx_mode = lan865x_set_multicast_list,
+ .ndo_set_mac_address = lan865x_set_mac_address,
+};
+
+static int lan865x_probe(struct spi_device *spi)
+{
+ struct net_device *netdev;
+ struct lan865x_priv *priv;
+ int ret;
+
+ netdev = alloc_etherdev(sizeof(struct lan865x_priv));
+ if (!netdev)
+ return -ENOMEM;
+
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->spi = spi;
+ spi_set_drvdata(spi, priv);
+ INIT_WORK(&priv->multicast_work, lan865x_multicast_work_handler);
+
+ priv->tc6 = oa_tc6_init(spi, netdev);
+ if (!priv->tc6) {
+ ret = -ENODEV;
+ goto free_netdev;
+ }
+
+ /* As per the point s3 in the below errata, SPI receive Ethernet frame
+ * transfer may halt when starting the next frame in the same data block
+ * (chunk) as the end of a previous frame. The RFA field should be
+ * configured to 01b or 10b for proper operation. In these modes, only
+ * one receive Ethernet frame will be placed in a single data block.
+ * When the RFA field is written to 01b, received frames will be forced
+ * to only start in the first word of the data block payload (SWO=0). As
+ * recommended, enable zero align receive frame feature for proper
+ * operation.
+ *
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/AIS/ProductDocuments/Errata/LAN8650-1-Errata-80001075.pdf
+ */
+ ret = oa_tc6_zero_align_receive_frame_enable(priv->tc6);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to set ZARFE: %d\n", ret);
+ goto oa_tc6_exit;
+ }
+
+ /* Get the MAC address from the SPI device tree node */
+ if (device_get_ethdev_address(&spi->dev, netdev))
+ eth_hw_addr_random(netdev);
+
+ ret = lan865x_set_hw_macaddr(priv, netdev->dev_addr);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to configure MAC: %d\n", ret);
+ goto oa_tc6_exit;
+ }
+
+ netdev->if_port = IF_PORT_10BASET;
+ netdev->irq = spi->irq;
+ netdev->netdev_ops = &lan865x_netdev_ops;
+ netdev->ethtool_ops = &lan865x_ethtool_ops;
+
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(&spi->dev, "Register netdev failed (ret = %d)", ret);
+ goto oa_tc6_exit;
+ }
+
+ return 0;
+
+oa_tc6_exit:
+ oa_tc6_exit(priv->tc6);
+free_netdev:
+ free_netdev(priv->netdev);
+ return ret;
+}
+
+static void lan865x_remove(struct spi_device *spi)
+{
+ struct lan865x_priv *priv = spi_get_drvdata(spi);
+
+ cancel_work_sync(&priv->multicast_work);
+ unregister_netdev(priv->netdev);
+ oa_tc6_exit(priv->tc6);
+ free_netdev(priv->netdev);
+}
+
+static const struct spi_device_id spidev_spi_ids[] = {
+ { .name = "lan8650" },
+ {},
+};
+
+static const struct of_device_id lan865x_dt_ids[] = {
+ { .compatible = "microchip,lan8650" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, lan865x_dt_ids);
+
+static struct spi_driver lan865x_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = lan865x_dt_ids,
+ },
+ .probe = lan865x_probe,
+ .remove = lan865x_remove,
+ .id_table = spidev_spi_ids,
+};
+module_spi_driver(lan865x_driver);
+
+MODULE_DESCRIPTION(DRV_NAME " 10Base-T1S MACPHY Ethernet Driver");
+MODULE_AUTHOR("Parthiban Veerasooran <parthiban.veerasooran@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
index f9ebffc04eb8..f663b6e12466 100644
--- a/drivers/net/ethernet/microchip/lan966x/Kconfig
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -8,6 +8,7 @@ config LAN966X_SWITCH
select PHYLINK
select PAGE_POOL
select VCAP
+ select FDMA
help
This driver supports the Lan966x network switch device.
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index 3b6ac331691d..4cdbe263502c 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -20,3 +20,4 @@ lan966x-switch-$(CONFIG_DEBUG_FS) += lan966x_vcap_debugfs.o
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
+ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
index aec7066d83b3..2474dfd330f4 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -549,16 +549,13 @@ static int lan966x_get_ts_info(struct net_device *dev,
phc = &lan966x->phc[LAN966X_PHC_PORT];
- info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
- if (info->phc_index == -1) {
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ if (phc->clock) {
+ info->phc_index = ptp_clock_index(phc->clock);
+ } else {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index 3960534ac2ad..502670718104 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -6,31 +6,55 @@
#include "lan966x_main.h"
-static int lan966x_fdma_channel_active(struct lan966x *lan966x)
-{
- return lan_rd(lan966x, FDMA_CH_ACTIVE);
-}
-
-static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
- struct lan966x_db *db)
+static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
{
+ struct lan966x *lan966x = (struct lan966x *)fdma->priv;
+ struct lan966x_rx *rx = &lan966x->rx;
struct page *page;
page = page_pool_dev_alloc_pages(rx->page_pool);
if (unlikely(!page))
- return NULL;
+ return -ENOMEM;
+
+ rx->page[dcb][db] = page;
+ *dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
+
+ return 0;
+}
- db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
+static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ struct lan966x *lan966x = (struct lan966x *)fdma->priv;
+
+ *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
- return page;
+ return 0;
+}
+
+static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ struct lan966x *lan966x = (struct lan966x *)fdma->priv;
+
+ *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM;
+
+ return 0;
+}
+
+static int lan966x_fdma_channel_active(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, FDMA_CH_ACTIVE);
}
static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
{
+ struct fdma *fdma = &rx->fdma;
int i, j;
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
- for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
+ for (i = 0; i < fdma->n_dcbs; ++i) {
+ for (j = 0; j < fdma->n_dbs; ++j)
page_pool_put_full_page(rx->page_pool,
rx->page[i][j], false);
}
@@ -38,41 +62,23 @@ static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
{
+ struct fdma *fdma = &rx->fdma;
struct page *page;
- page = rx->page[rx->dcb_index][rx->db_index];
+ page = rx->page[fdma->dcb_index][fdma->db_index];
if (unlikely(!page))
return;
page_pool_recycle_direct(rx->page_pool, page);
}
-static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
- struct lan966x_rx_dcb *dcb,
- u64 nextptr)
-{
- struct lan966x_db *db;
- int i;
-
- for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
- db = &dcb->db[i];
- db->status = FDMA_DCB_STATUS_INTR;
- }
-
- dcb->nextptr = FDMA_DCB_INVALID_DATA;
- dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
-
- rx->last_entry->nextptr = nextptr;
- rx->last_entry = dcb;
-}
-
static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
struct page_pool_params pp_params = {
.order = rx->page_order,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
- .pool_size = FDMA_DCB_MAX,
+ .pool_size = rx->fdma.n_dcbs,
.nid = NUMA_NO_NODE,
.dev = lan966x->dev,
.dma_dir = DMA_FROM_DEVICE,
@@ -104,84 +110,41 @@ static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
- struct lan966x_rx_dcb *dcb;
- struct lan966x_db *db;
- struct page *page;
- int i, j;
- int size;
+ struct fdma *fdma = &rx->fdma;
+ int err;
if (lan966x_fdma_rx_alloc_page_pool(rx))
return PTR_ERR(rx->page_pool);
- /* calculate how many pages are needed to allocate the dcbs */
- size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
-
- rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
- if (!rx->dcbs)
- return -ENOMEM;
-
- rx->last_entry = rx->dcbs;
- rx->db_index = 0;
- rx->dcb_index = 0;
-
- /* Now for each dcb allocate the dbs */
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
- dcb = &rx->dcbs[i];
- dcb->info = 0;
-
- /* For each db allocate a page and map it to the DB dataptr. */
- for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
- db = &dcb->db[j];
- page = lan966x_fdma_rx_alloc_page(rx, db);
- if (!page)
- return -ENOMEM;
-
- db->status = 0;
- rx->page[i][j] = page;
- }
+ err = fdma_alloc_coherent(lan966x->dev, fdma);
+ if (err)
+ return err;
- lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
- }
+ fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
return 0;
}
-static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
-{
- rx->dcb_index++;
- rx->dcb_index &= FDMA_DCB_MAX - 1;
-}
-
-static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
-{
- struct lan966x *lan966x = rx->lan966x;
- u32 size;
-
- /* Now it is possible to do the cleanup of dcb */
- size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
-}
-
static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
+ struct fdma *fdma = &rx->fdma;
u32 mask;
/* When activating a channel, first is required to write the first DCB
* address and then to activate it
*/
- lan_wr(lower_32_bits((u64)rx->dma), lan966x,
- FDMA_DCB_LLP(rx->channel_id));
- lan_wr(upper_32_bits((u64)rx->dma), lan966x,
- FDMA_DCB_LLP1(rx->channel_id));
+ lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP(fdma->channel_id));
+ lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP1(fdma->channel_id));
- lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
FDMA_CH_CFG_CH_MEM_SET(1),
- lan966x, FDMA_CH_CFG(rx->channel_id));
+ lan966x, FDMA_CH_CFG(fdma->channel_id));
/* Start fdma */
lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
@@ -191,13 +154,13 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
/* Enable interrupts */
mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
- mask |= BIT(rx->channel_id);
+ mask |= BIT(fdma->channel_id);
lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
FDMA_INTR_DB_ENA_INTR_DB_ENA,
lan966x, FDMA_INTR_DB_ENA);
/* Activate the channel */
- lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
FDMA_CH_ACTIVATE_CH_ACTIVATE,
lan966x, FDMA_CH_ACTIVATE);
}
@@ -205,18 +168,19 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
+ struct fdma *fdma = &rx->fdma;
u32 val;
/* Disable the channel */
- lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
FDMA_CH_DISABLE_CH_DISABLE,
lan966x, FDMA_CH_DISABLE);
readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
- val, !(val & BIT(rx->channel_id)),
+ val, !(val & BIT(fdma->channel_id)),
READL_SLEEP_US, READL_TIMEOUT_US);
- lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
FDMA_CH_DB_DISCARD_DB_DISCARD,
lan966x, FDMA_CH_DB_DISCARD);
}
@@ -225,50 +189,27 @@ static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
- lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
FDMA_CH_RELOAD_CH_RELOAD,
lan966x, FDMA_CH_RELOAD);
}
-static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
- struct lan966x_tx_dcb *dcb)
-{
- dcb->nextptr = FDMA_DCB_INVALID_DATA;
- dcb->info = 0;
-}
-
static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
- struct lan966x_tx_dcb *dcb;
- struct lan966x_db *db;
- int size;
- int i, j;
+ struct fdma *fdma = &tx->fdma;
+ int err;
- tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
+ tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
GFP_KERNEL);
if (!tx->dcbs_buf)
return -ENOMEM;
- /* calculate how many pages are needed to allocate the dcbs */
- size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
- if (!tx->dcbs)
+ err = fdma_alloc_coherent(lan966x->dev, fdma);
+ if (err)
goto out;
- /* Now for each dcb allocate the db */
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
- dcb = &tx->dcbs[i];
-
- for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
- db = &dcb->db[j];
- db->dataptr = 0;
- db->status = 0;
- }
-
- lan966x_fdma_tx_add_dcb(tx, dcb);
- }
+ fdma_dcbs_init(fdma, 0, 0);
return 0;
@@ -280,33 +221,30 @@ out:
static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
- int size;
kfree(tx->dcbs_buf);
-
- size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
+ fdma_free_coherent(lan966x->dev, &tx->fdma);
}
static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
+ struct fdma *fdma = &tx->fdma;
u32 mask;
/* When activating a channel, first is required to write the first DCB
* address and then to activate it
*/
- lan_wr(lower_32_bits((u64)tx->dma), lan966x,
- FDMA_DCB_LLP(tx->channel_id));
- lan_wr(upper_32_bits((u64)tx->dma), lan966x,
- FDMA_DCB_LLP1(tx->channel_id));
+ lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP(fdma->channel_id));
+ lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP1(fdma->channel_id));
- lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
FDMA_CH_CFG_CH_MEM_SET(1),
- lan966x, FDMA_CH_CFG(tx->channel_id));
+ lan966x, FDMA_CH_CFG(fdma->channel_id));
/* Start fdma */
lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
@@ -316,13 +254,13 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
/* Enable interrupts */
mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
- mask |= BIT(tx->channel_id);
+ mask |= BIT(fdma->channel_id);
lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
FDMA_INTR_DB_ENA_INTR_DB_ENA,
lan966x, FDMA_INTR_DB_ENA);
/* Activate the channel */
- lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
FDMA_CH_ACTIVATE_CH_ACTIVATE,
lan966x, FDMA_CH_ACTIVATE);
}
@@ -330,23 +268,23 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
+ struct fdma *fdma = &tx->fdma;
u32 val;
/* Disable the channel */
- lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
FDMA_CH_DISABLE_CH_DISABLE,
lan966x, FDMA_CH_DISABLE);
readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
- val, !(val & BIT(tx->channel_id)),
+ val, !(val & BIT(fdma->channel_id)),
READL_SLEEP_US, READL_TIMEOUT_US);
- lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
FDMA_CH_DB_DISCARD_DB_DISCARD,
lan966x, FDMA_CH_DB_DISCARD);
tx->activated = false;
- tx->last_in_use = -1;
}
static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
@@ -354,7 +292,7 @@ static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
struct lan966x *lan966x = tx->lan966x;
/* Write the registers to reload the channel */
- lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
FDMA_CH_RELOAD_CH_RELOAD,
lan966x, FDMA_CH_RELOAD);
}
@@ -393,23 +331,24 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
struct lan966x_tx *tx = &lan966x->tx;
struct lan966x_rx *rx = &lan966x->rx;
struct lan966x_tx_dcb_buf *dcb_buf;
+ struct fdma *fdma = &tx->fdma;
struct xdp_frame_bulk bq;
- struct lan966x_db *db;
unsigned long flags;
bool clear = false;
+ struct fdma_db *db;
int i;
xdp_frame_bulk_init(&bq);
spin_lock_irqsave(&lan966x->tx_lock, flags);
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ for (i = 0; i < fdma->n_dcbs; ++i) {
dcb_buf = &tx->dcbs_buf[i];
if (!dcb_buf->used)
continue;
- db = &tx->dcbs[i].db[0];
- if (!(db->status & FDMA_DCB_STATUS_DONE))
+ db = fdma_db_get(fdma, i, 0);
+ if (!fdma_db_is_done(db))
continue;
dcb_buf->dev->stats.tx_packets++;
@@ -449,27 +388,16 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
spin_unlock_irqrestore(&lan966x->tx_lock, flags);
}
-static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
-{
- struct lan966x_db *db;
-
- /* Check if there is any data */
- db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
- if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
- return false;
-
- return true;
-}
-
static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
{
struct lan966x *lan966x = rx->lan966x;
+ struct fdma *fdma = &rx->fdma;
struct lan966x_port *port;
- struct lan966x_db *db;
+ struct fdma_db *db;
struct page *page;
- db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
- page = rx->page[rx->dcb_index][rx->db_index];
+ db = fdma_db_next_get(fdma);
+ page = rx->page[fdma->dcb_index][fdma->db_index];
if (unlikely(!page))
return FDMA_ERROR;
@@ -494,16 +422,17 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
u64 src_port)
{
struct lan966x *lan966x = rx->lan966x;
- struct lan966x_db *db;
+ struct fdma *fdma = &rx->fdma;
struct sk_buff *skb;
+ struct fdma_db *db;
struct page *page;
u64 timestamp;
/* Get the received frame and unmap it */
- db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
- page = rx->page[rx->dcb_index][rx->db_index];
+ db = fdma_db_next_get(fdma);
+ page = rx->page[fdma->dcb_index][fdma->db_index];
- skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
+ skb = build_skb(page_address(page), fdma->db_size);
if (unlikely(!skb))
goto free_page;
@@ -546,21 +475,19 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
{
struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
struct lan966x_rx *rx = &lan966x->rx;
- int dcb_reload = rx->dcb_index;
- struct lan966x_rx_dcb *old_dcb;
- struct lan966x_db *db;
+ int old_dcb, dcb_reload, counter = 0;
+ struct fdma *fdma = &rx->fdma;
bool redirect = false;
struct sk_buff *skb;
- struct page *page;
- int counter = 0;
u64 src_port;
- u64 nextptr;
+
+ dcb_reload = fdma->dcb_index;
lan966x_fdma_tx_clear_buf(lan966x, weight);
/* Get all received skb */
while (counter < weight) {
- if (!lan966x_fdma_rx_more_frames(rx))
+ if (!fdma_has_frames(fdma))
break;
counter++;
@@ -570,22 +497,22 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
break;
case FDMA_ERROR:
lan966x_fdma_rx_free_page(rx);
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
goto allocate_new;
case FDMA_REDIRECT:
redirect = true;
fallthrough;
case FDMA_TX:
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
continue;
case FDMA_DROP:
lan966x_fdma_rx_free_page(rx);
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
continue;
}
skb = lan966x_fdma_rx_get_frame(rx, src_port);
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
if (!skb)
goto allocate_new;
@@ -594,20 +521,14 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
allocate_new:
/* Allocate new pages and map them */
- while (dcb_reload != rx->dcb_index) {
- db = &rx->dcbs[dcb_reload].db[rx->db_index];
- page = lan966x_fdma_rx_alloc_page(rx, db);
- if (unlikely(!page))
- break;
- rx->page[dcb_reload][rx->db_index] = page;
-
- old_dcb = &rx->dcbs[dcb_reload];
+ while (dcb_reload != fdma->dcb_index) {
+ old_dcb = dcb_reload;
dcb_reload++;
- dcb_reload &= FDMA_DCB_MAX - 1;
+ dcb_reload &= fdma->n_dcbs - 1;
+
+ fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
- nextptr = rx->dma + ((unsigned long)old_dcb -
- (unsigned long)rx->dcbs);
- lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
lan966x_fdma_rx_reload(rx);
}
@@ -650,56 +571,30 @@ irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
{
struct lan966x_tx_dcb_buf *dcb_buf;
+ struct fdma *fdma = &tx->fdma;
int i;
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ for (i = 0; i < fdma->n_dcbs; ++i) {
dcb_buf = &tx->dcbs_buf[i];
- if (!dcb_buf->used && i != tx->last_in_use)
+ if (!dcb_buf->used &&
+ !fdma_is_last(&tx->fdma, &tx->fdma.dcbs[i]))
return i;
}
return -1;
}
-static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
- int next_to_use, int len,
- dma_addr_t dma_addr)
-{
- struct lan966x_tx_dcb *next_dcb;
- struct lan966x_db *next_db;
-
- next_dcb = &tx->dcbs[next_to_use];
- next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
-
- next_db = &next_dcb->db[0];
- next_db->dataptr = dma_addr;
- next_db->status = FDMA_DCB_STATUS_SOF |
- FDMA_DCB_STATUS_EOF |
- FDMA_DCB_STATUS_INTR |
- FDMA_DCB_STATUS_BLOCKO(0) |
- FDMA_DCB_STATUS_BLOCKL(len);
-}
-
-static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
+static void lan966x_fdma_tx_start(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
- struct lan966x_tx_dcb *dcb;
if (likely(lan966x->tx.activated)) {
- /* Connect current dcb to the next db */
- dcb = &tx->dcbs[tx->last_in_use];
- dcb->nextptr = tx->dma + (next_to_use *
- sizeof(struct lan966x_tx_dcb));
-
lan966x_fdma_tx_reload(tx);
} else {
/* Because it is first time, then just activate */
lan966x->tx.activated = true;
lan966x_fdma_tx_activate(tx);
}
-
- /* Move to next dcb because this last in use */
- tx->last_in_use = next_to_use;
}
int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
@@ -752,11 +647,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->data.xdpf = xdpf;
next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
-
- /* Setup next dcb */
- lan966x_fdma_tx_setup_dcb(tx, next_to_use,
- xdpf->len + IFH_LEN_BYTES,
- dma_addr);
} else {
page = ptr;
@@ -773,11 +663,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->data.page = page;
next_dcb_buf->len = len + IFH_LEN_BYTES;
-
- /* Setup next dcb */
- lan966x_fdma_tx_setup_dcb(tx, next_to_use,
- len + IFH_LEN_BYTES,
- dma_addr + XDP_PACKET_HEADROOM);
}
/* Fill up the buffer */
@@ -788,8 +673,19 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->ptp = false;
next_dcb_buf->dev = port->dev;
+ __fdma_dcb_add(&tx->fdma,
+ next_to_use,
+ 0,
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len),
+ &fdma_nextptr_cb,
+ &lan966x_fdma_xdp_tx_dataptr_cb);
+
/* Start the transmission */
- lan966x_fdma_tx_start(tx, next_to_use);
+ lan966x_fdma_tx_start(tx);
out:
spin_unlock(&lan966x->tx_lock);
@@ -847,9 +743,6 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
goto release;
}
- /* Setup next dcb */
- lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
-
/* Fill up the buffer */
next_dcb_buf = &tx->dcbs_buf[next_to_use];
next_dcb_buf->use_skb = true;
@@ -861,12 +754,21 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
next_dcb_buf->ptp = false;
next_dcb_buf->dev = dev;
+ fdma_dcb_add(&tx->fdma,
+ next_to_use,
+ 0,
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len));
+
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
next_dcb_buf->ptp = true;
/* Start the transmission */
- lan966x_fdma_tx_start(tx, next_to_use);
+ lan966x_fdma_tx_start(tx);
return NETDEV_TX_OK;
@@ -908,14 +810,11 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x)
static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
{
struct page_pool *page_pool;
- dma_addr_t rx_dma;
- void *rx_dcbs;
- u32 size;
+ struct fdma fdma_rx_old;
int err;
/* Store these for later to free them */
- rx_dma = lan966x->rx.dma;
- rx_dcbs = lan966x->rx.dcbs;
+ memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma));
page_pool = lan966x->rx.page_pool;
napi_synchronize(&lan966x->napi);
@@ -931,9 +830,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
goto restore;
lan966x_fdma_rx_start(&lan966x->rx);
- size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
+ fdma_free_coherent(lan966x->dev, &fdma_rx_old);
page_pool_destroy(page_pool);
@@ -943,8 +840,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
return err;
restore:
lan966x->rx.page_pool = page_pool;
- lan966x->rx.dma = rx_dma;
- lan966x->rx.dcbs = rx_dcbs;
+ memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma));
lan966x_fdma_rx_start(&lan966x->rx);
return err;
@@ -1034,11 +930,24 @@ int lan966x_fdma_init(struct lan966x *lan966x)
return 0;
lan966x->rx.lan966x = lan966x;
- lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
+ lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
+ lan966x->rx.fdma.priv = lan966x;
+ lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
+ lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
+ lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
+ lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
lan966x->tx.lan966x = lan966x;
- lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
- lan966x->tx.last_in_use = -1;
+ lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
+ lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
+ lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
+ lan966x->tx.fdma.priv = lan966x;
+ lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma);
+ lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
+ lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
+ lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb;
err = lan966x_fdma_rx_alloc(&lan966x->rx);
if (err)
@@ -1046,7 +955,7 @@ int lan966x_fdma_init(struct lan966x *lan966x)
err = lan966x_fdma_tx_alloc(&lan966x->tx);
if (err) {
- lan966x_fdma_rx_free(&lan966x->rx);
+ fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
return err;
}
@@ -1067,7 +976,7 @@ void lan966x_fdma_deinit(struct lan966x *lan966x)
napi_disable(&lan966x->napi);
lan966x_fdma_rx_free_pages(&lan966x->rx);
- lan966x_fdma_rx_free(&lan966x->rx);
+ fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
page_pool_destroy(lan966x->rx.page_pool);
lan966x_fdma_tx_free(&lan966x->tx);
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index ec672af12e25..534d4716d5f7 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -816,7 +816,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_HW_TC;
dev->hw_features |= NETIF_F_HW_TC;
- dev->priv_flags |= IFF_SEE_ALL_HWTSTAMP_REQUESTS;
+ dev->see_all_hwtstamp_requests = true;
dev->needed_headroom = IFH_LEN_BYTES;
eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index f8bebbcf77b2..25cb2f61986f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -16,6 +16,7 @@
#include <net/switchdev.h>
#include <net/xdp.h>
+#include <fdma_api.h>
#include <vcap_api.h>
#include <vcap_api_client.h>
@@ -76,15 +77,6 @@
#define FDMA_RX_DCB_MAX_DBS 1
#define FDMA_TX_DCB_MAX_DBS 1
-#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
-
-#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
-#define FDMA_DCB_STATUS_SOF BIT(16)
-#define FDMA_DCB_STATUS_EOF BIT(17)
-#define FDMA_DCB_STATUS_INTR BIT(18)
-#define FDMA_DCB_STATUS_DONE BIT(19)
-#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
-#define FDMA_DCB_INVALID_DATA 0x1
#define FDMA_XTR_CHANNEL 6
#define FDMA_INJ_CHANNEL 0
@@ -199,49 +191,14 @@ enum vcap_is1_port_sel_rt {
struct lan966x_port;
-struct lan966x_db {
- u64 dataptr;
- u64 status;
-};
-
-struct lan966x_rx_dcb {
- u64 nextptr;
- u64 info;
- struct lan966x_db db[FDMA_RX_DCB_MAX_DBS];
-};
-
-struct lan966x_tx_dcb {
- u64 nextptr;
- u64 info;
- struct lan966x_db db[FDMA_TX_DCB_MAX_DBS];
-};
-
struct lan966x_rx {
struct lan966x *lan966x;
- /* Pointer to the array of hardware dcbs. */
- struct lan966x_rx_dcb *dcbs;
-
- /* Pointer to the last address in the dcbs. */
- struct lan966x_rx_dcb *last_entry;
+ struct fdma fdma;
/* For each DB, there is a page */
struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
- /* Represents the db_index, it can have a value between 0 and
- * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS
- * it means that the DCB can be reused.
- */
- int db_index;
-
- /* Represents the index in the dcbs. It has a value between 0 and
- * FDMA_DCB_MAX
- */
- int dcb_index;
-
- /* Represents the dma address to the dcbs array */
- dma_addr_t dma;
-
/* Represents the page order that is used to allocate the pages for the
* RX buffers. This value is calculated based on max MTU of the devices.
*/
@@ -252,8 +209,6 @@ struct lan966x_rx {
*/
u32 max_mtu;
- u8 channel_id;
-
struct page_pool *page_pool;
};
@@ -275,18 +230,11 @@ struct lan966x_tx_dcb_buf {
struct lan966x_tx {
struct lan966x *lan966x;
- /* Pointer to the dcb list */
- struct lan966x_tx_dcb *dcbs;
- u16 last_in_use;
-
- /* Represents the DMA address to the first entry of the dcb entries. */
- dma_addr_t dma;
+ struct fdma fdma;
/* Array of dcbs that are given to the HW */
struct lan966x_tx_dcb_buf *dcbs_buf;
- u8 channel_id;
-
bool activated;
};
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index f58c506bda22..3f04992eace6 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -10,6 +10,7 @@ config SPARX5_SWITCH
select PHY_SPARX5_SERDES
select RESET_CONTROLLER
select VCAP
+ select FDMA
help
This driver supports the Sparx5 network switch device.
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index b68fe9c9a656..288de95add18 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -18,3 +18,4 @@ sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
+ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 4f800c1a435d..d898a7238b48 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1194,16 +1194,13 @@ static int sparx5_get_ts_info(struct net_device *dev,
phc = &sparx5->phc[SPARX5_PHC_PORT];
- info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
- if (info->phc_index == -1) {
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ if (phc->clock) {
+ info->phc_index = ptp_clock_index(phc->clock);
+ } else {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 1915998f6079..61df874b7623 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -21,107 +21,51 @@
#define FDMA_XTR_CHANNEL 6
#define FDMA_INJ_CHANNEL 0
-#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
-#define FDMA_DCB_INFO_TOKEN BIT(17)
-#define FDMA_DCB_INFO_INTR BIT(18)
-#define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24))
-
-#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
-#define FDMA_DCB_STATUS_SOF BIT(16)
-#define FDMA_DCB_STATUS_EOF BIT(17)
-#define FDMA_DCB_STATUS_INTR BIT(18)
-#define FDMA_DCB_STATUS_DONE BIT(19)
-#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
-#define FDMA_DCB_INVALID_DATA 0x1
-
#define FDMA_XTR_BUFFER_SIZE 2048
#define FDMA_WEIGHT 4
-/* Frame DMA DCB format
- *
- * +---------------------------+
- * | Next Ptr |
- * +---------------------------+
- * | Reserved | Info |
- * +---------------------------+
- * | Data0 Ptr |
- * +---------------------------+
- * | Reserved | Status0 |
- * +---------------------------+
- * | Data1 Ptr |
- * +---------------------------+
- * | Reserved | Status1 |
- * +---------------------------+
- * | Data2 Ptr |
- * +---------------------------+
- * | Reserved | Status2 |
- * |-------------|-------------|
- * | |
- * | |
- * | |
- * | |
- * | |
- * |---------------------------|
- * | Data14 Ptr |
- * +-------------|-------------+
- * | Reserved | Status14 |
- * +-------------|-------------+
- */
-
-/* For each hardware DB there is an entry in this list and when the HW DB
- * entry is used, this SW DB entry is moved to the back of the list
- */
-struct sparx5_db {
- struct list_head list;
- void *cpu_addr;
-};
-
-static void sparx5_fdma_rx_add_dcb(struct sparx5_rx *rx,
- struct sparx5_rx_dcb_hw *dcb,
- u64 nextptr)
+static int sparx5_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
{
- int idx = 0;
-
- /* Reset the status of the DB */
- for (idx = 0; idx < FDMA_RX_DCB_MAX_DBS; ++idx) {
- struct sparx5_db_hw *db = &dcb->db[idx];
+ *dataptr = fdma->dma + (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
+ ((dcb * fdma->n_dbs + db) * fdma->db_size);
- db->status = FDMA_DCB_STATUS_INTR;
- }
- dcb->nextptr = FDMA_DCB_INVALID_DATA;
- dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE);
- rx->last_entry->nextptr = nextptr;
- rx->last_entry = dcb;
+ return 0;
}
-static void sparx5_fdma_tx_add_dcb(struct sparx5_tx *tx,
- struct sparx5_tx_dcb_hw *dcb,
- u64 nextptr)
+static int sparx5_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
{
- int idx = 0;
+ struct sparx5 *sparx5 = fdma->priv;
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sk_buff *skb;
- /* Reset the status of the DB */
- for (idx = 0; idx < FDMA_TX_DCB_MAX_DBS; ++idx) {
- struct sparx5_db_hw *db = &dcb->db[idx];
+ skb = __netdev_alloc_skb(rx->ndev, fdma->db_size, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return -ENOMEM;
- db->status = FDMA_DCB_STATUS_DONE;
- }
- dcb->nextptr = FDMA_DCB_INVALID_DATA;
- dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE);
+ *dataptr = virt_to_phys(skb->data);
+
+ rx->skb[dcb][db] = skb;
+
+ return 0;
}
static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx)
{
+ struct fdma *fdma = &rx->fdma;
+
/* Write the buffer address in the LLP and LLP1 regs */
- spx5_wr(((u64)rx->dma) & GENMASK(31, 0), sparx5,
- FDMA_DCB_LLP(rx->channel_id));
- spx5_wr(((u64)rx->dma) >> 32, sparx5, FDMA_DCB_LLP1(rx->channel_id));
+ spx5_wr(((u64)fdma->dma) & GENMASK(31, 0), sparx5,
+ FDMA_DCB_LLP(fdma->channel_id));
+ spx5_wr(((u64)fdma->dma) >> 32, sparx5,
+ FDMA_DCB_LLP1(fdma->channel_id));
/* Set the number of RX DBs to be used, and DB end-of-frame interrupt */
- spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
+ spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(XTR_QUEUE),
- sparx5, FDMA_CH_CFG(rx->channel_id));
+ sparx5, FDMA_CH_CFG(fdma->channel_id));
/* Set the RX Watermark to max */
spx5_rmw(FDMA_XTR_CFG_XTR_FIFO_WM_SET(31), FDMA_XTR_CFG_XTR_FIFO_WM,
@@ -133,22 +77,24 @@ static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx)
sparx5, FDMA_PORT_CTRL(0));
/* Enable RX channel DB interrupt */
- spx5_rmw(BIT(rx->channel_id),
- BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ spx5_rmw(BIT(fdma->channel_id),
+ BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
sparx5, FDMA_INTR_DB_ENA);
/* Activate the RX channel */
- spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_ACTIVATE);
+ spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_ACTIVATE);
}
static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx)
{
+ struct fdma *fdma = &rx->fdma;
+
/* Deactivate the RX channel */
- spx5_rmw(0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ spx5_rmw(0, BIT(fdma->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
sparx5, FDMA_CH_ACTIVATE);
/* Disable RX channel DB interrupt */
- spx5_rmw(0, BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ spx5_rmw(0, BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
sparx5, FDMA_INTR_DB_ENA);
/* Stop RX fdma */
@@ -158,75 +104,55 @@ static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *r
static void sparx5_fdma_tx_activate(struct sparx5 *sparx5, struct sparx5_tx *tx)
{
+ struct fdma *fdma = &tx->fdma;
+
/* Write the buffer address in the LLP and LLP1 regs */
- spx5_wr(((u64)tx->dma) & GENMASK(31, 0), sparx5,
- FDMA_DCB_LLP(tx->channel_id));
- spx5_wr(((u64)tx->dma) >> 32, sparx5, FDMA_DCB_LLP1(tx->channel_id));
+ spx5_wr(((u64)fdma->dma) & GENMASK(31, 0), sparx5,
+ FDMA_DCB_LLP(fdma->channel_id));
+ spx5_wr(((u64)fdma->dma) >> 32, sparx5,
+ FDMA_DCB_LLP1(fdma->channel_id));
/* Set the number of TX DBs to be used, and DB end-of-frame interrupt */
- spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
+ spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(INJ_QUEUE),
- sparx5, FDMA_CH_CFG(tx->channel_id));
+ sparx5, FDMA_CH_CFG(fdma->channel_id));
/* Start TX fdma */
spx5_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), FDMA_PORT_CTRL_INJ_STOP,
sparx5, FDMA_PORT_CTRL(0));
/* Activate the channel */
- spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_ACTIVATE);
+ spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_ACTIVATE);
}
static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *tx)
{
/* Disable the channel */
- spx5_rmw(0, BIT(tx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ spx5_rmw(0, BIT(tx->fdma.channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
sparx5, FDMA_CH_ACTIVATE);
}
-static void sparx5_fdma_rx_reload(struct sparx5 *sparx5, struct sparx5_rx *rx)
+static void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma)
{
/* Reload the RX channel */
- spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_RELOAD);
-}
-
-static void sparx5_fdma_tx_reload(struct sparx5 *sparx5, struct sparx5_tx *tx)
-{
- /* Reload the TX channel */
- spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_RELOAD);
-}
-
-static struct sk_buff *sparx5_fdma_rx_alloc_skb(struct sparx5_rx *rx)
-{
- return __netdev_alloc_skb(rx->ndev, FDMA_XTR_BUFFER_SIZE,
- GFP_ATOMIC);
+ spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_RELOAD);
}
static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx)
{
- struct sparx5_db_hw *db_hw;
- unsigned int packet_size;
+ struct fdma *fdma = &rx->fdma;
struct sparx5_port *port;
- struct sk_buff *new_skb;
+ struct fdma_db *db_hw;
struct frame_info fi;
struct sk_buff *skb;
- dma_addr_t dma_addr;
/* Check if the DCB is done */
- db_hw = &rx->dcb_entries[rx->dcb_index].db[rx->db_index];
- if (unlikely(!(db_hw->status & FDMA_DCB_STATUS_DONE)))
- return false;
- skb = rx->skb[rx->dcb_index][rx->db_index];
- /* Replace the DB entry with a new SKB */
- new_skb = sparx5_fdma_rx_alloc_skb(rx);
- if (unlikely(!new_skb))
+ db_hw = fdma_db_next_get(fdma);
+ if (unlikely(!fdma_db_is_done(db_hw)))
return false;
- /* Map the new skb data and set the new skb */
- dma_addr = virt_to_phys(new_skb->data);
- rx->skb[rx->dcb_index][rx->db_index] = new_skb;
- db_hw->dataptr = dma_addr;
- packet_size = FDMA_DCB_STATUS_BLOCKL(db_hw->status);
- skb_put(skb, packet_size);
+ skb = rx->skb[fdma->dcb_index][fdma->db_index];
+ skb_put(skb, fdma_db_len_get(db_hw));
/* Now do the normal processing of the skb */
sparx5_ifh_parse((u32 *)skb->data, &fi);
/* Map to port netdev */
@@ -259,84 +185,62 @@ static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
{
struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
+ struct fdma *fdma = &rx->fdma;
int counter = 0;
while (counter < weight && sparx5_fdma_rx_get_frame(sparx5, rx)) {
- struct sparx5_rx_dcb_hw *old_dcb;
-
- rx->db_index++;
+ fdma_db_advance(fdma);
counter++;
/* Check if the DCB can be reused */
- if (rx->db_index != FDMA_RX_DCB_MAX_DBS)
+ if (fdma_dcb_is_reusable(fdma))
continue;
- /* As the DCB can be reused, just advance the dcb_index
- * pointer and set the nextptr in the DCB
- */
- rx->db_index = 0;
- old_dcb = &rx->dcb_entries[rx->dcb_index];
- rx->dcb_index++;
- rx->dcb_index &= FDMA_DCB_MAX - 1;
- sparx5_fdma_rx_add_dcb(rx, old_dcb,
- rx->dma +
- ((unsigned long)old_dcb -
- (unsigned long)rx->dcb_entries));
+ fdma_dcb_add(fdma, fdma->dcb_index,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+ fdma_db_reset(fdma);
+ fdma_dcb_advance(fdma);
}
if (counter < weight) {
napi_complete_done(&rx->napi, counter);
- spx5_rmw(BIT(rx->channel_id),
- BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ spx5_rmw(BIT(fdma->channel_id),
+ BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
sparx5, FDMA_INTR_DB_ENA);
}
if (counter)
- sparx5_fdma_rx_reload(sparx5, rx);
+ sparx5_fdma_reload(sparx5, fdma);
return counter;
}
-static struct sparx5_tx_dcb_hw *sparx5_fdma_next_dcb(struct sparx5_tx *tx,
- struct sparx5_tx_dcb_hw *dcb)
-{
- struct sparx5_tx_dcb_hw *next_dcb;
-
- next_dcb = dcb;
- next_dcb++;
- /* Handle wrap-around */
- if ((unsigned long)next_dcb >=
- ((unsigned long)tx->first_entry + FDMA_DCB_MAX * sizeof(*dcb)))
- next_dcb = tx->first_entry;
- return next_dcb;
-}
-
int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
{
- struct sparx5_tx_dcb_hw *next_dcb_hw;
struct sparx5_tx *tx = &sparx5->tx;
+ struct fdma *fdma = &tx->fdma;
static bool first_time = true;
- struct sparx5_db_hw *db_hw;
- struct sparx5_db *db;
+ void *virt_addr;
- next_dcb_hw = sparx5_fdma_next_dcb(tx, tx->curr_entry);
- db_hw = &next_dcb_hw->db[0];
- if (!(db_hw->status & FDMA_DCB_STATUS_DONE))
+ fdma_dcb_advance(fdma);
+ if (!fdma_db_is_done(fdma_db_get(fdma, fdma->dcb_index, 0)))
return -EINVAL;
- db = list_first_entry(&tx->db_list, struct sparx5_db, list);
- list_move_tail(&db->list, &tx->db_list);
- next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA;
- tx->curr_entry->nextptr = tx->dma +
- ((unsigned long)next_dcb_hw -
- (unsigned long)tx->first_entry);
- tx->curr_entry = next_dcb_hw;
- memset(db->cpu_addr, 0, FDMA_XTR_BUFFER_SIZE);
- memcpy(db->cpu_addr, ifh, IFH_LEN * 4);
- memcpy(db->cpu_addr + IFH_LEN * 4, skb->data, skb->len);
- db_hw->status = FDMA_DCB_STATUS_SOF |
- FDMA_DCB_STATUS_EOF |
- FDMA_DCB_STATUS_BLOCKO(0) |
- FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4);
+
+ /* Get the virtual address of the dataptr for the next DB */
+ virt_addr = ((u8 *)fdma->dcbs +
+ (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
+ ((fdma->dcb_index * fdma->n_dbs) * fdma->db_size));
+
+ memcpy(virt_addr, ifh, IFH_LEN * 4);
+ memcpy(virt_addr + IFH_LEN * 4, skb->data, skb->len);
+
+ fdma_dcb_add(fdma, fdma->dcb_index, 0,
+ FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4));
+
if (first_time) {
sparx5_fdma_tx_activate(sparx5, tx);
first_time = false;
} else {
- sparx5_fdma_tx_reload(sparx5, tx);
+ sparx5_fdma_reload(sparx5, fdma);
}
return NETDEV_TX_OK;
}
@@ -344,43 +248,16 @@ int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5)
{
struct sparx5_rx *rx = &sparx5->rx;
- struct sparx5_rx_dcb_hw *dcb;
- int idx, jdx;
- int size;
-
- size = sizeof(struct sparx5_rx_dcb_hw) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- rx->dcb_entries = devm_kzalloc(sparx5->dev, size, GFP_KERNEL);
- if (!rx->dcb_entries)
- return -ENOMEM;
- rx->dma = virt_to_phys(rx->dcb_entries);
- rx->last_entry = rx->dcb_entries;
- rx->db_index = 0;
- rx->dcb_index = 0;
- /* Now for each dcb allocate the db */
- for (idx = 0; idx < FDMA_DCB_MAX; ++idx) {
- dcb = &rx->dcb_entries[idx];
- dcb->info = 0;
- /* For each db allocate an skb and map skb data pointer to the DB
- * dataptr. In this way when the frame is received the skb->data
- * will contain the frame, so no memcpy is needed
- */
- for (jdx = 0; jdx < FDMA_RX_DCB_MAX_DBS; ++jdx) {
- struct sparx5_db_hw *db_hw = &dcb->db[jdx];
- dma_addr_t dma_addr;
- struct sk_buff *skb;
-
- skb = sparx5_fdma_rx_alloc_skb(rx);
- if (!skb)
- return -ENOMEM;
-
- dma_addr = virt_to_phys(skb->data);
- db_hw->dataptr = dma_addr;
- db_hw->status = 0;
- rx->skb[idx][jdx] = skb;
- }
- sparx5_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * idx);
- }
+ struct fdma *fdma = &rx->fdma;
+ int err;
+
+ err = fdma_alloc_phys(fdma);
+ if (err)
+ return err;
+
+ fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback,
FDMA_WEIGHT);
napi_enable(&rx->napi);
@@ -391,57 +268,33 @@ static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5)
static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5)
{
struct sparx5_tx *tx = &sparx5->tx;
- struct sparx5_tx_dcb_hw *dcb;
- int idx, jdx;
- int size;
-
- size = sizeof(struct sparx5_tx_dcb_hw) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- tx->curr_entry = devm_kzalloc(sparx5->dev, size, GFP_KERNEL);
- if (!tx->curr_entry)
- return -ENOMEM;
- tx->dma = virt_to_phys(tx->curr_entry);
- tx->first_entry = tx->curr_entry;
- INIT_LIST_HEAD(&tx->db_list);
- /* Now for each dcb allocate the db */
- for (idx = 0; idx < FDMA_DCB_MAX; ++idx) {
- dcb = &tx->curr_entry[idx];
- dcb->info = 0;
- /* TX databuffers must be 16byte aligned */
- for (jdx = 0; jdx < FDMA_TX_DCB_MAX_DBS; ++jdx) {
- struct sparx5_db_hw *db_hw = &dcb->db[jdx];
- struct sparx5_db *db;
- dma_addr_t phys;
- void *cpu_addr;
-
- cpu_addr = devm_kzalloc(sparx5->dev,
- FDMA_XTR_BUFFER_SIZE,
- GFP_KERNEL);
- if (!cpu_addr)
- return -ENOMEM;
- phys = virt_to_phys(cpu_addr);
- db_hw->dataptr = phys;
- db_hw->status = 0;
- db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- return -ENOMEM;
- db->cpu_addr = cpu_addr;
- list_add_tail(&db->list, &tx->db_list);
- }
- sparx5_fdma_tx_add_dcb(tx, dcb, tx->dma + sizeof(*dcb) * idx);
- /* Let the curr_entry to point to the last allocated entry */
- if (idx == FDMA_DCB_MAX - 1)
- tx->curr_entry = dcb;
- }
+ struct fdma *fdma = &tx->fdma;
+ int err;
+
+ err = fdma_alloc_phys(fdma);
+ if (err)
+ return err;
+
+ fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_DONE);
+
return 0;
}
static void sparx5_fdma_rx_init(struct sparx5 *sparx5,
struct sparx5_rx *rx, int channel)
{
+ struct fdma *fdma = &rx->fdma;
int idx;
- rx->channel_id = channel;
+ fdma->channel_id = channel;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = FDMA_RX_DCB_MAX_DBS;
+ fdma->priv = sparx5;
+ fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE);
+ fdma->size = fdma_get_size(&sparx5->rx.fdma);
+ fdma->ops.dataptr_cb = &sparx5_fdma_rx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
/* Fetch a netdev for SKB and NAPI use, any will do */
for (idx = 0; idx < SPX5_PORTS; ++idx) {
struct sparx5_port *port = sparx5->ports[idx];
@@ -456,7 +309,16 @@ static void sparx5_fdma_rx_init(struct sparx5 *sparx5,
static void sparx5_fdma_tx_init(struct sparx5 *sparx5,
struct sparx5_tx *tx, int channel)
{
- tx->channel_id = channel;
+ struct fdma *fdma = &tx->fdma;
+
+ fdma->channel_id = channel;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = FDMA_TX_DCB_MAX_DBS;
+ fdma->priv = sparx5;
+ fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE);
+ fdma->size = fdma_get_size_contiguous(&sparx5->tx.fdma);
+ fdma->ops.dataptr_cb = &sparx5_fdma_tx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
}
irqreturn_t sparx5_fdma_handler(int irq, void *args)
@@ -594,5 +456,7 @@ int sparx5_fdma_stop(struct sparx5 *sparx5)
read_poll_timeout(sparx5_fdma_port_ctrl, val,
FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0,
500, 10000, 0, sparx5);
+ fdma_free_phys(&sparx5->rx.fdma);
+ fdma_free_phys(&sparx5->tx.fdma);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 1982ae03b4fe..3309060b1e4c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -20,6 +20,8 @@
#include <linux/debugfs.h>
#include <net/flow_offload.h>
+#include <fdma_api.h>
+
#include "sparx5_main_regs.h"
/* Target chip type */
@@ -100,23 +102,6 @@ enum sparx5_vlan_port_type {
struct sparx5;
-struct sparx5_db_hw {
- u64 dataptr;
- u64 status;
-};
-
-struct sparx5_rx_dcb_hw {
- u64 nextptr;
- u64 info;
- struct sparx5_db_hw db[FDMA_RX_DCB_MAX_DBS];
-};
-
-struct sparx5_tx_dcb_hw {
- u64 nextptr;
- u64 info;
- struct sparx5_db_hw db[FDMA_TX_DCB_MAX_DBS];
-};
-
/* Frame DMA receive state:
* For each DB, there is a SKB, and the skb data pointer is mapped in
* the DB. Once a frame is received the skb is given to the upper layers
@@ -124,14 +109,10 @@ struct sparx5_tx_dcb_hw {
* When the db_index reached FDMA_RX_DCB_MAX_DBS the DB is reused.
*/
struct sparx5_rx {
- struct sparx5_rx_dcb_hw *dcb_entries;
- struct sparx5_rx_dcb_hw *last_entry;
+ struct fdma fdma;
struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
- int db_index;
- int dcb_index;
dma_addr_t dma;
struct napi_struct napi;
- u32 channel_id;
struct net_device *ndev;
u64 packets;
};
@@ -140,11 +121,7 @@ struct sparx5_rx {
* DCBs are chained using the DCBs nextptr field.
*/
struct sparx5_tx {
- struct sparx5_tx_dcb_hw *curr_entry;
- struct sparx5_tx_dcb_hw *first_entry;
- struct list_head db_list;
- dma_addr_t dma;
- u32 channel_id;
+ struct fdma fdma;
u64 packets;
u64 dropped;
};
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
index 15db423be4aa..459a53676ae9 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
@@ -31,10 +31,10 @@ static u64 sparx5_mirror_port_get(struct sparx5 *sparx5, u32 idx)
/* Add port to mirror (only front ports) */
static void sparx5_mirror_port_add(struct sparx5 *sparx5, u32 idx, u32 portno)
{
- u32 val, reg = portno;
+ u64 reg = portno;
+ u32 val;
- reg = portno / BITS_PER_BYTE;
- val = BIT(portno % BITS_PER_BYTE);
+ val = BIT(do_div(reg, 32));
if (reg == 0)
return spx5_rmw(val, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx));
@@ -45,10 +45,10 @@ static void sparx5_mirror_port_add(struct sparx5 *sparx5, u32 idx, u32 portno)
/* Delete port from mirror (only front ports) */
static void sparx5_mirror_port_del(struct sparx5 *sparx5, u32 idx, u32 portno)
{
- u32 val, reg = portno;
+ u64 reg = portno;
+ u32 val;
- reg = portno / BITS_PER_BYTE;
- val = BIT(portno % BITS_PER_BYTE);
+ val = BIT(do_div(reg, 32));
if (reg == 0)
return spx5_rmw(0, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx));
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index f3f5fb420468..70427643f777 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -45,8 +45,12 @@ void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
fwd = (fwd >> 5);
info->src_port = FIELD_GET(GENMASK(7, 1), fwd);
+ /*
+ * Bit 270-271 are occasionally unexpectedly set by the hardware,
+ * clear bits before extracting timestamp
+ */
info->timestamp =
- ((u64)xtr_hdr[2] << 24) |
+ ((u64)(xtr_hdr[2] & GENMASK(5, 0)) << 24) |
((u64)xtr_hdr[3] << 16) |
((u64)xtr_hdr[4] << 8) |
((u64)xtr_hdr[5] << 0);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
index 51d9423b08a6..7251121ab196 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -1442,18 +1442,10 @@ static void vcap_api_encode_rule_test(struct kunit *test)
vcap_enable_lookups(&test_vctrl, &test_netdev, 0, 0,
rule->cookie, false);
- vcap_free_rule(rule);
-
- /* Check that the rule has been freed: tricky to access since this
- * memory should not be accessible anymore
- */
- KUNIT_EXPECT_PTR_NE(test, NULL, rule);
- ret = list_empty(&rule->keyfields);
- KUNIT_EXPECT_EQ(test, true, ret);
- ret = list_empty(&rule->actionfields);
- KUNIT_EXPECT_EQ(test, true, ret);
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, id);
+ KUNIT_EXPECT_EQ(test, 0, ret);
- vcap_del_rule(&test_vctrl, &test_netdev, id);
+ vcap_free_rule(rule);
}
static void vcap_api_set_rule_counter_test(struct kunit *test)
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index ddb8f68d80a2..ca4ed58f1206 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1496,11 +1496,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto release_region;
- err = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
- if (err) {
- dev_err(&pdev->dev, "Failed to set dma device segment size\n");
- goto release_region;
- }
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
err = -ENOMEM;
gc = vzalloc(sizeof(*gc));
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index cafded2f9382..a00f915c5188 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -52,9 +52,33 @@ static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
return 0;
}
+static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
+ struct hwc_work_request *req)
+{
+ struct device *dev = hwc_rxq->hwc->dev;
+ struct gdma_sge *sge;
+ int err;
+
+ sge = &req->sge;
+ sge->address = (u64)req->buf_sge_addr;
+ sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
+ sge->size = req->buf_len;
+
+ memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
+ req->wqe_req.sgl = sge;
+ req->wqe_req.num_sge = 1;
+ req->wqe_req.client_data_unit = 0;
+
+ err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
+ if (err)
+ dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
+ return err;
+}
+
static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
- const struct gdma_resp_hdr *resp_msg)
+ struct hwc_work_request *rx_req)
{
+ const struct gdma_resp_hdr *resp_msg = rx_req->buf_va;
struct hwc_caller_ctx *ctx;
int err;
@@ -62,6 +86,7 @@ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
hwc->inflight_msg_res.map)) {
dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
resp_msg->response.hwc_msg_id);
+ mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
return;
}
@@ -75,30 +100,13 @@ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
memcpy(ctx->output_buf, resp_msg, resp_len);
out:
ctx->error = err;
- complete(&ctx->comp_event);
-}
-
-static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
- struct hwc_work_request *req)
-{
- struct device *dev = hwc_rxq->hwc->dev;
- struct gdma_sge *sge;
- int err;
-
- sge = &req->sge;
- sge->address = (u64)req->buf_sge_addr;
- sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
- sge->size = req->buf_len;
- memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
- req->wqe_req.sgl = sge;
- req->wqe_req.num_sge = 1;
- req->wqe_req.client_data_unit = 0;
+ /* Must post rx wqe before complete(), otherwise the next rx may
+ * hit no_wqe error.
+ */
+ mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
- err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
- if (err)
- dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
- return err;
+ complete(&ctx->comp_event);
}
static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
@@ -235,14 +243,12 @@ static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
return;
}
- mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
+ mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req);
- /* Do no longer use 'resp', because the buffer is posted to the HW
- * in the below mana_hwc_post_rx_wqe().
+ /* Can no longer use 'resp', because the buffer is posted to the HW
+ * in mana_hwc_handle_resp() above.
*/
resp = NULL;
-
- mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
}
static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index d2f07e179e86..c47266d1c7c2 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -511,7 +511,7 @@ static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
}
/* Release pre-allocated RX buffers */
-static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
+void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
{
struct device *dev;
int i;
@@ -599,12 +599,16 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
else
*headroom = XDP_PACKET_HEADROOM;
- *alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
+ *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
+
+ /* Using page pool in this case, so alloc_size is PAGE_SIZE */
+ if (*alloc_size < PAGE_SIZE)
+ *alloc_size = PAGE_SIZE;
*datasize = mtu + ETH_HLEN;
}
-static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
+int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues)
{
struct device *dev;
struct page *page;
@@ -618,7 +622,7 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
dev = mpc->ac->gdma_dev->gdma_context->dev;
- num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE;
+ num_rxb = num_queues * mpc->rx_queue_size;
WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
@@ -678,7 +682,7 @@ static int mana_change_mtu(struct net_device *ndev, int new_mtu)
int err;
/* Pre-allocate buffers to prevent failure in mana_attach later */
- err = mana_pre_alloc_rxbufs(mpc, new_mtu);
+ err = mana_pre_alloc_rxbufs(mpc, new_mtu, mpc->num_queues);
if (err) {
netdev_err(ndev, "Insufficient memory for new MTU\n");
return err;
@@ -1788,7 +1792,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
{
struct mana_cq *cq = context;
- u8 arm_bit;
int w;
WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
@@ -1799,16 +1802,23 @@ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
mana_poll_tx_cq(cq);
w = cq->work_done;
-
- if (w < cq->budget &&
- napi_complete_done(&cq->napi, w)) {
- arm_bit = SET_ARM_BIT;
- } else {
- arm_bit = 0;
+ cq->work_done_since_doorbell += w;
+
+ if (w < cq->budget) {
+ mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
+ cq->work_done_since_doorbell = 0;
+ napi_complete_done(&cq->napi, w);
+ } else if (cq->work_done_since_doorbell >
+ cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) {
+ /* MANA hardware requires at least one doorbell ring every 8
+ * wraparounds of CQ even if there is no need to arm the CQ.
+ * This driver rings the doorbell as soon as we have exceeded
+ * 4 wraparounds.
+ */
+ mana_gd_ring_cq(gdma_queue, 0);
+ cq->work_done_since_doorbell = 0;
}
- mana_gd_ring_cq(gdma_queue, arm_bit);
-
return w;
}
@@ -1862,10 +1872,12 @@ static void mana_destroy_txq(struct mana_port_context *apc)
for (i = 0; i < apc->num_queues; i++) {
napi = &apc->tx_qp[i].tx_cq.napi;
- napi_synchronize(napi);
- napi_disable(napi);
- netif_napi_del(napi);
-
+ if (apc->tx_qp[i].txq.napi_initialized) {
+ napi_synchronize(napi);
+ napi_disable(napi);
+ netif_napi_del(napi);
+ apc->tx_qp[i].txq.napi_initialized = false;
+ }
mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
@@ -1899,15 +1911,17 @@ static int mana_create_txq(struct mana_port_context *apc,
return -ENOMEM;
/* The minimum size of the WQE is 32 bytes, hence
- * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
+ * apc->tx_queue_size represents the maximum number of WQEs
* the SQ can store. This value is then used to size other queues
* to prevent overflow.
+ * Also note that the txq_size is always going to be MANA_PAGE_ALIGNED,
+ * as min val of apc->tx_queue_size is 128 and that would make
+ * txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size
+ * are always power of two
*/
- txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
- BUILD_BUG_ON(!MANA_PAGE_ALIGNED(txq_size));
+ txq_size = apc->tx_queue_size * 32;
- cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
- cq_size = MANA_PAGE_ALIGN(cq_size);
+ cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE;
gc = gd->gdma_context;
@@ -1921,6 +1935,7 @@ static int mana_create_txq(struct mana_port_context *apc,
txq->ndev = net;
txq->net_txq = netdev_get_tx_queue(net, i);
txq->vp_offset = apc->tx_vp_offset;
+ txq->napi_initialized = false;
skb_queue_head_init(&txq->pending_skbs);
memset(&spec, 0, sizeof(spec));
@@ -1987,6 +2002,7 @@ static int mana_create_txq(struct mana_port_context *apc,
netif_napi_add_tx(net, &cq->napi, mana_poll);
napi_enable(&cq->napi);
+ txq->napi_initialized = true;
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
}
@@ -1998,7 +2014,7 @@ out:
}
static void mana_destroy_rxq(struct mana_port_context *apc,
- struct mana_rxq *rxq, bool validate_state)
+ struct mana_rxq *rxq, bool napi_initialized)
{
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
@@ -2013,15 +2029,15 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
napi = &rxq->rx_cq.napi;
- if (validate_state)
+ if (napi_initialized) {
napi_synchronize(napi);
- napi_disable(napi);
+ napi_disable(napi);
+ netif_napi_del(napi);
+ }
xdp_rxq_info_unreg(&rxq->xdp_rxq);
- netif_napi_del(napi);
-
mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
mana_deinit_cq(apc, &rxq->rx_cq);
@@ -2145,10 +2161,11 @@ static int mana_push_wqe(struct mana_rxq *rxq)
static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
{
+ struct mana_port_context *mpc = netdev_priv(rxq->ndev);
struct page_pool_params pprm = {};
int ret;
- pprm.pool_size = RX_BUFFERS_PER_QUEUE;
+ pprm.pool_size = mpc->rx_queue_size;
pprm.nid = gc->numa_node;
pprm.napi = &rxq->rx_cq.napi;
pprm.netdev = rxq->ndev;
@@ -2180,13 +2197,13 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc = gd->gdma_context;
- rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
+ rxq = kzalloc(struct_size(rxq, rx_oobs, apc->rx_queue_size),
GFP_KERNEL);
if (!rxq)
return NULL;
rxq->ndev = ndev;
- rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
+ rxq->num_rx_buf = apc->rx_queue_size;
rxq->rxq_idx = rxq_idx;
rxq->rxobj = INVALID_MANA_HANDLE;
@@ -2734,6 +2751,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
apc->ndev = ndev;
apc->max_queues = gc->max_num_queues;
apc->num_queues = gc->max_num_queues;
+ apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE;
+ apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE;
apc->port_handle = INVALID_MANA_HANDLE;
apc->pf_filter_handle = INVALID_MANA_HANDLE;
apc->port_idx = port_idx;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index 146d5db1792f..dc3864377538 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -345,27 +345,101 @@ static int mana_set_channels(struct net_device *ndev,
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int new_count = channels->combined_count;
unsigned int old_count = apc->num_queues;
- int err, err2;
+ int err;
+
+ err = mana_pre_alloc_rxbufs(apc, ndev->mtu, new_count);
+ if (err) {
+ netdev_err(ndev, "Insufficient memory for new allocations");
+ return err;
+ }
err = mana_detach(ndev, false);
if (err) {
netdev_err(ndev, "mana_detach failed: %d\n", err);
- return err;
+ goto out;
}
apc->num_queues = new_count;
err = mana_attach(ndev);
- if (!err)
- return 0;
+ if (err) {
+ apc->num_queues = old_count;
+ netdev_err(ndev, "mana_attach failed: %d\n", err);
+ }
+
+out:
+ mana_pre_dealloc_rxbufs(apc);
+ return err;
+}
+
+static void mana_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+
+ ring->rx_pending = apc->rx_queue_size;
+ ring->tx_pending = apc->tx_queue_size;
+ ring->rx_max_pending = MAX_RX_BUFFERS_PER_QUEUE;
+ ring->tx_max_pending = MAX_TX_BUFFERS_PER_QUEUE;
+}
+
+static int mana_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ u32 new_tx, new_rx;
+ u32 old_tx, old_rx;
+ int err;
- netdev_err(ndev, "mana_attach failed: %d\n", err);
+ old_tx = apc->tx_queue_size;
+ old_rx = apc->rx_queue_size;
- /* Try to roll it back to the old configuration. */
- apc->num_queues = old_count;
- err2 = mana_attach(ndev);
- if (err2)
- netdev_err(ndev, "mana re-attach failed: %d\n", err2);
+ if (ring->tx_pending < MIN_TX_BUFFERS_PER_QUEUE) {
+ NL_SET_ERR_MSG_FMT(extack, "tx:%d less than the min:%d", ring->tx_pending,
+ MIN_TX_BUFFERS_PER_QUEUE);
+ return -EINVAL;
+ }
+
+ if (ring->rx_pending < MIN_RX_BUFFERS_PER_QUEUE) {
+ NL_SET_ERR_MSG_FMT(extack, "rx:%d less than the min:%d", ring->rx_pending,
+ MIN_RX_BUFFERS_PER_QUEUE);
+ return -EINVAL;
+ }
+
+ new_rx = roundup_pow_of_two(ring->rx_pending);
+ new_tx = roundup_pow_of_two(ring->tx_pending);
+ netdev_info(ndev, "Using nearest power of 2 values for Txq:%d Rxq:%d\n",
+ new_tx, new_rx);
+
+ /* pre-allocating new buffers to prevent failures in mana_attach() later */
+ apc->rx_queue_size = new_rx;
+ err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
+ apc->rx_queue_size = old_rx;
+ if (err) {
+ netdev_err(ndev, "Insufficient memory for new allocations\n");
+ return err;
+ }
+ err = mana_detach(ndev, false);
+ if (err) {
+ netdev_err(ndev, "mana_detach failed: %d\n", err);
+ goto out;
+ }
+
+ apc->tx_queue_size = new_tx;
+ apc->rx_queue_size = new_rx;
+
+ err = mana_attach(ndev);
+ if (err) {
+ netdev_err(ndev, "mana_attach failed: %d\n", err);
+ apc->tx_queue_size = old_tx;
+ apc->rx_queue_size = old_rx;
+ }
+out:
+ mana_pre_dealloc_rxbufs(apc);
return err;
}
@@ -380,4 +454,6 @@ const struct ethtool_ops mana_ethtool_ops = {
.set_rxfh = mana_set_rxfh,
.get_channels = mana_get_channels,
.set_channels = mana_set_channels,
+ .get_ringparam = mana_get_ringparam,
+ .set_ringparam = mana_set_ringparam,
};
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index ed2fb44500b0..3d72aa7b1305 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -453,9 +453,158 @@ static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot,
return VLAN_N_VID - bridge_num - 1;
}
+/**
+ * ocelot_update_vlan_reclassify_rule() - Make switch aware only to bridge VLAN TPID
+ *
+ * @ocelot: Switch private data structure
+ * @port: Index of ingress port
+ *
+ * IEEE 802.1Q-2018 clauses "5.5 C-VLAN component conformance" and "5.6 S-VLAN
+ * component conformance" suggest that a C-VLAN component should only recognize
+ * and filter on C-Tags, and an S-VLAN component should only recognize and
+ * process based on C-Tags.
+ *
+ * In Linux, as per commit 1a0b20b25732 ("Merge branch 'bridge-next'"), C-VLAN
+ * components are largely represented by a bridge with vlan_protocol 802.1Q,
+ * and S-VLAN components by a bridge with vlan_protocol 802.1ad.
+ *
+ * Currently the driver only offloads vlan_protocol 802.1Q, but the hardware
+ * design is non-conformant, because the switch assigns each frame to a VLAN
+ * based on an entirely different question, as detailed in figure "Basic VLAN
+ * Classification Flow" from its manual and reproduced below.
+ *
+ * Set TAG_TYPE, PCP, DEI, VID to port-default values in VLAN_CFG register
+ * if VLAN_AWARE_ENA[port] and frame has outer tag then:
+ * if VLAN_INNER_TAG_ENA[port] and frame has inner tag then:
+ * TAG_TYPE = (Frame.InnerTPID <> 0x8100)
+ * Set PCP, DEI, VID to values from inner VLAN header
+ * else:
+ * TAG_TYPE = (Frame.OuterTPID <> 0x8100)
+ * Set PCP, DEI, VID to values from outer VLAN header
+ * if VID == 0 then:
+ * VID = VLAN_CFG.VLAN_VID
+ *
+ * Summarized, the switch will recognize both 802.1Q and 802.1ad TPIDs as VLAN
+ * "with equal rights", and just set the TAG_TYPE bit to 0 (if 802.1Q) or to 1
+ * (if 802.1ad). It will classify based on whichever of the tags is "outer", no
+ * matter what TPID that may have (or "inner", if VLAN_INNER_TAG_ENA[port]).
+ *
+ * In the VLAN Table, the TAG_TYPE information is not accessible - just the
+ * classified VID is - so it is as if each VLAN Table entry is for 2 VLANs:
+ * C-VLAN X, and S-VLAN X.
+ *
+ * Whereas the Linux bridge behavior is to only filter on frames with a TPID
+ * equal to the vlan_protocol, and treat everything else as VLAN-untagged.
+ *
+ * Consider an ingress packet tagged with 802.1ad VID=3 and 802.1Q VID=5,
+ * received on a bridge vlan_filtering=1 vlan_protocol=802.1Q port. This frame
+ * should be treated as 802.1Q-untagged, and classified to the PVID of that
+ * bridge port. Not to VID=3, and not to VID=5.
+ *
+ * The VCAP IS1 TCAM has everything we need to overwrite the choices made in
+ * the basic VLAN classification pipeline: it can match on TAG_TYPE in the key,
+ * and it can modify the classified VID in the action. Thus, for each port
+ * under a vlan_filtering bridge, we can insert a rule in VCAP IS1 lookup 0 to
+ * match on 802.1ad tagged frames and modify their classified VID to the 802.1Q
+ * PVID of the port. This effectively makes it appear to the outside world as
+ * if those packets were processed as VLAN-untagged.
+ *
+ * The rule needs to be updated each time the bridge PVID changes, and needs
+ * to be deleted if the bridge PVID is deleted, or if the port becomes
+ * VLAN-unaware.
+ */
+static int ocelot_update_vlan_reclassify_rule(struct ocelot *ocelot, int port)
+{
+ unsigned long cookie = OCELOT_VCAP_IS1_VLAN_RECLASSIFY(ocelot, port);
+ struct ocelot_vcap_block *block_vcap_is1 = &ocelot->block[VCAP_IS1];
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ const struct ocelot_bridge_vlan *pvid_vlan;
+ struct ocelot_vcap_filter *filter;
+ int err, val, pcp, dei;
+ bool vid_replace_ena;
+ u16 vid;
+
+ pvid_vlan = ocelot_port->pvid_vlan;
+ vid_replace_ena = ocelot_port->vlan_aware && pvid_vlan;
+
+ filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, cookie,
+ false);
+ if (!vid_replace_ena) {
+ /* If the reclassification filter doesn't need to exist, delete
+ * it if it was previously installed, and exit doing nothing
+ * otherwise.
+ */
+ if (filter)
+ return ocelot_vcap_filter_del(ocelot, filter);
+
+ return 0;
+ }
+
+ /* The reclassification rule must apply. See if it already exists
+ * or if it must be created.
+ */
+
+ /* Treating as VLAN-untagged means using as classified VID equal to
+ * the bridge PVID, and PCP/DEI set to the port default QoS values.
+ */
+ vid = pvid_vlan->vid;
+ val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
+ pcp = ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val);
+ dei = !!(val & ANA_PORT_QOS_CFG_DP_DEFAULT_VAL);
+
+ if (filter) {
+ bool changed = false;
+
+ /* Filter exists, just update it */
+ if (filter->action.vid != vid) {
+ filter->action.vid = vid;
+ changed = true;
+ }
+ if (filter->action.pcp != pcp) {
+ filter->action.pcp = pcp;
+ changed = true;
+ }
+ if (filter->action.dei != dei) {
+ filter->action.dei = dei;
+ changed = true;
+ }
+
+ if (!changed)
+ return 0;
+
+ return ocelot_vcap_filter_replace(ocelot, filter);
+ }
+
+ /* Filter doesn't exist, create it */
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return -ENOMEM;
+
+ filter->key_type = OCELOT_VCAP_KEY_ANY;
+ filter->ingress_port_mask = BIT(port);
+ filter->vlan.tpid = OCELOT_VCAP_BIT_1;
+ filter->prio = 1;
+ filter->id.cookie = cookie;
+ filter->id.tc_offload = false;
+ filter->block_id = VCAP_IS1;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ filter->lookup = 0;
+ filter->action.vid_replace_ena = true;
+ filter->action.pcp_dei_ena = true;
+ filter->action.vid = vid;
+ filter->action.pcp = pcp;
+ filter->action.dei = dei;
+
+ err = ocelot_vcap_filter_add(ocelot, filter, NULL);
+ if (err)
+ kfree(filter);
+
+ return err;
+}
+
/* Default vlan to clasify for untagged frames (may be zero) */
-static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
- const struct ocelot_bridge_vlan *pvid_vlan)
+static int ocelot_port_set_pvid(struct ocelot *ocelot, int port,
+ const struct ocelot_bridge_vlan *pvid_vlan)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge);
@@ -475,15 +624,23 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
* happens automatically), but also 802.1p traffic which gets
* classified to VLAN 0, but that is always in our RX filter, so it
* would get accepted were it not for this setting.
+ *
+ * Also, we only support the bridge 802.1Q VLAN protocol, so
+ * 802.1ad-tagged frames (carrying S-Tags) should be considered
+ * 802.1Q-untagged, and also dropped.
*/
if (!pvid_vlan && ocelot_port->vlan_aware)
val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA;
ocelot_rmw_gix(ocelot, val,
ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA,
ANA_PORT_DROP_CFG, port);
+
+ return ocelot_update_vlan_reclassify_rule(ocelot, port);
}
static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
@@ -631,7 +788,10 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
ANA_PORT_VLAN_CFG, port);
- ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
+ err = ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
+ if (err)
+ return err;
+
ocelot_port_manage_port_tag(ocelot, port);
return 0;
@@ -684,9 +844,12 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
return err;
/* Default ingress vlan classification */
- if (pvid)
- ocelot_port_set_pvid(ocelot, port,
- ocelot_bridge_vlan_find(ocelot, vid));
+ if (pvid) {
+ err = ocelot_port_set_pvid(ocelot, port,
+ ocelot_bridge_vlan_find(ocelot, vid));
+ if (err)
+ return err;
+ }
/* Untagged egress vlan clasification */
ocelot_port_manage_port_tag(ocelot, port);
@@ -712,8 +875,11 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
return err;
/* Ingress */
- if (del_pvid)
- ocelot_port_set_pvid(ocelot, port, NULL);
+ if (del_pvid) {
+ err = ocelot_port_set_pvid(ocelot, port, NULL);
+ if (err)
+ return err;
+ }
/* Egress */
ocelot_port_manage_port_tag(ocelot, port);
@@ -1099,6 +1265,48 @@ void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
}
EXPORT_SYMBOL(ocelot_ptp_rx_timestamp);
+void ocelot_lock_inj_grp(struct ocelot *ocelot, int grp)
+ __acquires(&ocelot->inj_lock)
+{
+ spin_lock(&ocelot->inj_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_lock_inj_grp);
+
+void ocelot_unlock_inj_grp(struct ocelot *ocelot, int grp)
+ __releases(&ocelot->inj_lock)
+{
+ spin_unlock(&ocelot->inj_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_unlock_inj_grp);
+
+void ocelot_lock_xtr_grp(struct ocelot *ocelot, int grp)
+ __acquires(&ocelot->inj_lock)
+{
+ spin_lock(&ocelot->inj_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_lock_xtr_grp);
+
+void ocelot_unlock_xtr_grp(struct ocelot *ocelot, int grp)
+ __releases(&ocelot->inj_lock)
+{
+ spin_unlock(&ocelot->inj_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_unlock_xtr_grp);
+
+void ocelot_lock_xtr_grp_bh(struct ocelot *ocelot, int grp)
+ __acquires(&ocelot->xtr_lock)
+{
+ spin_lock_bh(&ocelot->xtr_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_lock_xtr_grp_bh);
+
+void ocelot_unlock_xtr_grp_bh(struct ocelot *ocelot, int grp)
+ __releases(&ocelot->xtr_lock)
+{
+ spin_unlock_bh(&ocelot->xtr_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_unlock_xtr_grp_bh);
+
int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
{
u64 timestamp, src_port, len;
@@ -1109,6 +1317,8 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
u32 val, *buf;
int err;
+ lockdep_assert_held(&ocelot->xtr_lock);
+
err = ocelot_xtr_poll_xfh(ocelot, grp, xfh);
if (err)
return err;
@@ -1184,6 +1394,8 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp)
{
u32 val = ocelot_read(ocelot, QS_INJ_STATUS);
+ lockdep_assert_held(&ocelot->inj_lock);
+
if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))))
return false;
if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))
@@ -1193,28 +1405,55 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp)
}
EXPORT_SYMBOL(ocelot_can_inject);
-void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag)
+/**
+ * ocelot_ifh_set_basic - Set basic information in Injection Frame Header
+ * @ifh: Pointer to Injection Frame Header memory
+ * @ocelot: Switch private data structure
+ * @port: Egress port number
+ * @rew_op: Egress rewriter operation for PTP
+ * @skb: Pointer to socket buffer (packet)
+ *
+ * Populate the Injection Frame Header with basic information for this skb: the
+ * analyzer bypass bit, destination port, VLAN info, egress rewriter info.
+ */
+void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
+ u32 rew_op, struct sk_buff *skb)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct net_device *dev = skb->dev;
+ u64 vlan_tci, tag_type;
+ int qos_class;
+
+ ocelot_xmit_get_vlan_info(skb, ocelot_port->bridge, &vlan_tci,
+ &tag_type);
+
+ qos_class = netdev_get_num_tc(dev) ?
+ netdev_get_prio_tc_map(dev, skb->priority) : skb->priority;
+
+ memset(ifh, 0, OCELOT_TAG_LEN);
ocelot_ifh_set_bypass(ifh, 1);
+ ocelot_ifh_set_src(ifh, BIT_ULL(ocelot->num_phys_ports));
ocelot_ifh_set_dest(ifh, BIT_ULL(port));
- ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
- if (vlan_tag)
- ocelot_ifh_set_vlan_tci(ifh, vlan_tag);
+ ocelot_ifh_set_qos_class(ifh, qos_class);
+ ocelot_ifh_set_tag_type(ifh, tag_type);
+ ocelot_ifh_set_vlan_tci(ifh, vlan_tci);
if (rew_op)
ocelot_ifh_set_rew_op(ifh, rew_op);
}
-EXPORT_SYMBOL(ocelot_ifh_port_set);
+EXPORT_SYMBOL(ocelot_ifh_set_basic);
void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
u32 rew_op, struct sk_buff *skb)
{
- u32 ifh[OCELOT_TAG_LEN / 4] = {0};
+ u32 ifh[OCELOT_TAG_LEN / 4];
unsigned int i, count, last;
+ lockdep_assert_held(&ocelot->inj_lock);
+
ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
- ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
+ ocelot_ifh_set_basic(ifh, ocelot, port, rew_op, skb);
for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
@@ -1247,6 +1486,8 @@ EXPORT_SYMBOL(ocelot_port_inject_frame);
void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
{
+ lockdep_assert_held(&ocelot->xtr_lock);
+
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
ocelot_read_rix(ocelot, QS_XTR_RD, grp);
}
@@ -2532,7 +2773,7 @@ int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio)
ANA_PORT_QOS_CFG,
port);
- return 0;
+ return ocelot_update_vlan_reclassify_rule(ocelot, port);
}
EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio);
@@ -2929,6 +3170,8 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->fwd_domain_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
+ spin_lock_init(&ocelot->inj_lock);
+ spin_lock_init(&ocelot->xtr_lock);
ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0);
if (!ocelot->owq)
diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c
index 312a46832154..00326ae8c708 100644
--- a/drivers/net/ethernet/mscc/ocelot_fdma.c
+++ b/drivers/net/ethernet/mscc/ocelot_fdma.c
@@ -665,8 +665,7 @@ static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op,
ifh = skb_push(skb, OCELOT_TAG_LEN);
skb_put(skb, ETH_FCS_LEN);
- memset(ifh, 0, OCELOT_TAG_LEN);
- ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
+ ocelot_ifh_set_basic(ifh, ocelot, port, rew_op, skb);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index b3c28260adf8..e172638b0601 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -582,17 +582,13 @@ EXPORT_SYMBOL(ocelot_hwstamp_set);
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
struct kernel_ethtool_ts_info *info)
{
- info->phc_index = ocelot->ptp_clock ?
- ptp_clock_index(ocelot->ptp_clock) : -1;
- if (info->phc_index == -1) {
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ if (ocelot->ptp_clock) {
+ info->phc_index = ptp_clock_index(ocelot->ptp_clock);
+ } else {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index 73cdec5ca6a3..5734b86aed5b 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -695,6 +695,7 @@ static void is1_entry_set(struct ocelot *ocelot, int ix,
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_MC, filter->dmac_mc);
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_BC, filter->dmac_bc);
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_VLAN_TAGGED, tag->tagged);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TPID, tag->tpid);
vcap_key_set(vcap, &data, VCAP_IS1_HK_VID,
tag->vid.value, tag->vid.mask);
vcap_key_set(vcap, &data, VCAP_IS1_HK_PCP,
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 993212c3a7da..c09dd2e3343c 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -51,6 +51,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
struct ocelot *ocelot = arg;
int grp = 0, err;
+ ocelot_lock_xtr_grp(ocelot, grp);
+
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
@@ -69,6 +71,8 @@ out:
if (err < 0)
ocelot_drain_cpu_queue(ocelot, 0);
+ ocelot_unlock_xtr_grp(ocelot, grp);
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index df2ab5cbd49b..3a02eef58cc6 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -4537,8 +4537,8 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
u64 *prog;
int err;
- prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
- GFP_KERNEL);
+ prog = kmemdup_array(nfp_prog->prog, nfp_prog->prog_len, sizeof(u64),
+ GFP_KERNEL);
if (!prog)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index cc54faca2283..515069d5637b 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -6,7 +6,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/netdevice.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ktime.h>
#include <net/xfrm.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 182ba0a8b095..6e0929af0f72 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -821,14 +821,13 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
snprintf(r_vec->name, sizeof(r_vec->name),
"%s-rxtx-%d", nfp_net_name(nn), idx);
- err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
- r_vec);
+ err = request_irq(r_vec->irq_vector, r_vec->handler, IRQF_NO_AUTOEN,
+ r_vec->name, r_vec);
if (err) {
nfp_net_napi_del(&nn->dp, r_vec);
nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
return err;
}
- disable_irq(r_vec->irq_vector);
irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
index 2dd37557185e..7276e44a21d0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -41,6 +41,8 @@ struct nfp_dump_tl {
);
char data[];
};
+static_assert(offsetof(struct nfp_dump_tl, data) == sizeof(struct nfp_dump_tl_hdr),
+ "struct member likely outside of struct_group_tagged()");
/* NFP CPP parameters */
struct nfp_dumpspec_cpp_isl_id {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index eee0bfc41074..227e7a5d712e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -248,7 +248,6 @@ nfp_repr_fix_features(struct net_device *netdev, netdev_features_t features)
features = netdev_intersect_features(features, lower_features);
features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_HW_TC);
- features |= NETIF_F_LLTX;
return features;
}
@@ -386,7 +385,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
- netdev->features |= NETIF_F_LLTX;
+ netdev->lltx = true;
if (nfp_app_has_tc(app)) {
netdev->features |= NETIF_F_HW_TC;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 3f10c5365c80..7c2200b49ce4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -15,7 +15,7 @@
* abstraction builds upon this BAR interface.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kref.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index a8286d0032d1..669f9f8fb507 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -9,7 +9,7 @@
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/ioport.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index 508ae6b571ca..addf02c63b1a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -9,7 +9,7 @@
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
index f05dd34ab89f..cfa4db5d3f85 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
@@ -15,7 +15,7 @@
*/
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 7136bc48530b..0bd6477292a6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -7,7 +7,7 @@
* Jason McMullan <jason.mcmullan@netronome.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/firmware.h>
@@ -278,7 +278,7 @@ struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp)
res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP);
if (IS_ERR(res))
- return (void *)res;
+ return ERR_CAST(res);
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index 2260c2403a83..68862ac062d2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -10,7 +10,7 @@
* Francois H. Theron <francois.theron@netronome.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/oa_tc6.c b/drivers/net/ethernet/oa_tc6.c
new file mode 100644
index 000000000000..f9c0dcd965c2
--- /dev/null
+++ b/drivers/net/ethernet/oa_tc6.c
@@ -0,0 +1,1361 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface framework
+ *
+ * Author: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/oa_tc6.h>
+
+/* OPEN Alliance TC6 registers */
+/* Standard Capabilities Register */
+#define OA_TC6_REG_STDCAP 0x0002
+#define STDCAP_DIRECT_PHY_REG_ACCESS BIT(8)
+
+/* Reset Control and Status Register */
+#define OA_TC6_REG_RESET 0x0003
+#define RESET_SWRESET BIT(0) /* Software Reset */
+
+/* Configuration Register #0 */
+#define OA_TC6_REG_CONFIG0 0x0004
+#define CONFIG0_SYNC BIT(15)
+#define CONFIG0_ZARFE_ENABLE BIT(12)
+
+/* Status Register #0 */
+#define OA_TC6_REG_STATUS0 0x0008
+#define STATUS0_RESETC BIT(6) /* Reset Complete */
+#define STATUS0_HEADER_ERROR BIT(5)
+#define STATUS0_LOSS_OF_FRAME_ERROR BIT(4)
+#define STATUS0_RX_BUFFER_OVERFLOW_ERROR BIT(3)
+#define STATUS0_TX_PROTOCOL_ERROR BIT(0)
+
+/* Buffer Status Register */
+#define OA_TC6_REG_BUFFER_STATUS 0x000B
+#define BUFFER_STATUS_TX_CREDITS_AVAILABLE GENMASK(15, 8)
+#define BUFFER_STATUS_RX_CHUNKS_AVAILABLE GENMASK(7, 0)
+
+/* Interrupt Mask Register #0 */
+#define OA_TC6_REG_INT_MASK0 0x000C
+#define INT_MASK0_HEADER_ERR_MASK BIT(5)
+#define INT_MASK0_LOSS_OF_FRAME_ERR_MASK BIT(4)
+#define INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK BIT(3)
+#define INT_MASK0_TX_PROTOCOL_ERR_MASK BIT(0)
+
+/* PHY Clause 22 registers base address and mask */
+#define OA_TC6_PHY_STD_REG_ADDR_BASE 0xFF00
+#define OA_TC6_PHY_STD_REG_ADDR_MASK 0x1F
+
+/* Control command header */
+#define OA_TC6_CTRL_HEADER_DATA_NOT_CTRL BIT(31)
+#define OA_TC6_CTRL_HEADER_WRITE_NOT_READ BIT(29)
+#define OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR GENMASK(27, 24)
+#define OA_TC6_CTRL_HEADER_ADDR GENMASK(23, 8)
+#define OA_TC6_CTRL_HEADER_LENGTH GENMASK(7, 1)
+#define OA_TC6_CTRL_HEADER_PARITY BIT(0)
+
+/* Data header */
+#define OA_TC6_DATA_HEADER_DATA_NOT_CTRL BIT(31)
+#define OA_TC6_DATA_HEADER_DATA_VALID BIT(21)
+#define OA_TC6_DATA_HEADER_START_VALID BIT(20)
+#define OA_TC6_DATA_HEADER_START_WORD_OFFSET GENMASK(19, 16)
+#define OA_TC6_DATA_HEADER_END_VALID BIT(14)
+#define OA_TC6_DATA_HEADER_END_BYTE_OFFSET GENMASK(13, 8)
+#define OA_TC6_DATA_HEADER_PARITY BIT(0)
+
+/* Data footer */
+#define OA_TC6_DATA_FOOTER_EXTENDED_STS BIT(31)
+#define OA_TC6_DATA_FOOTER_RXD_HEADER_BAD BIT(30)
+#define OA_TC6_DATA_FOOTER_CONFIG_SYNC BIT(29)
+#define OA_TC6_DATA_FOOTER_RX_CHUNKS GENMASK(28, 24)
+#define OA_TC6_DATA_FOOTER_DATA_VALID BIT(21)
+#define OA_TC6_DATA_FOOTER_START_VALID BIT(20)
+#define OA_TC6_DATA_FOOTER_START_WORD_OFFSET GENMASK(19, 16)
+#define OA_TC6_DATA_FOOTER_END_VALID BIT(14)
+#define OA_TC6_DATA_FOOTER_END_BYTE_OFFSET GENMASK(13, 8)
+#define OA_TC6_DATA_FOOTER_TX_CREDITS GENMASK(5, 1)
+
+/* PHY – Clause 45 registers memory map selector (MMS) as per table 6 in the
+ * OPEN Alliance specification.
+ */
+#define OA_TC6_PHY_C45_PCS_MMS2 2 /* MMD 3 */
+#define OA_TC6_PHY_C45_PMA_PMD_MMS3 3 /* MMD 1 */
+#define OA_TC6_PHY_C45_VS_PLCA_MMS4 4 /* MMD 31 */
+#define OA_TC6_PHY_C45_AUTO_NEG_MMS5 5 /* MMD 7 */
+#define OA_TC6_PHY_C45_POWER_UNIT_MMS6 6 /* MMD 13 */
+
+#define OA_TC6_CTRL_HEADER_SIZE 4
+#define OA_TC6_CTRL_REG_VALUE_SIZE 4
+#define OA_TC6_CTRL_IGNORED_SIZE 4
+#define OA_TC6_CTRL_MAX_REGISTERS 128
+#define OA_TC6_CTRL_SPI_BUF_SIZE (OA_TC6_CTRL_HEADER_SIZE +\
+ (OA_TC6_CTRL_MAX_REGISTERS *\
+ OA_TC6_CTRL_REG_VALUE_SIZE) +\
+ OA_TC6_CTRL_IGNORED_SIZE)
+#define OA_TC6_CHUNK_PAYLOAD_SIZE 64
+#define OA_TC6_DATA_HEADER_SIZE 4
+#define OA_TC6_CHUNK_SIZE (OA_TC6_DATA_HEADER_SIZE +\
+ OA_TC6_CHUNK_PAYLOAD_SIZE)
+#define OA_TC6_MAX_TX_CHUNKS 48
+#define OA_TC6_SPI_DATA_BUF_SIZE (OA_TC6_MAX_TX_CHUNKS *\
+ OA_TC6_CHUNK_SIZE)
+#define STATUS0_RESETC_POLL_DELAY 1000
+#define STATUS0_RESETC_POLL_TIMEOUT 1000000
+
+/* Internal structure for MAC-PHY drivers */
+struct oa_tc6 {
+ struct device *dev;
+ struct net_device *netdev;
+ struct phy_device *phydev;
+ struct mii_bus *mdiobus;
+ struct spi_device *spi;
+ struct mutex spi_ctrl_lock; /* Protects spi control transfer */
+ void *spi_ctrl_tx_buf;
+ void *spi_ctrl_rx_buf;
+ void *spi_data_tx_buf;
+ void *spi_data_rx_buf;
+ struct sk_buff *ongoing_tx_skb;
+ struct sk_buff *waiting_tx_skb;
+ struct sk_buff *rx_skb;
+ struct task_struct *spi_thread;
+ wait_queue_head_t spi_wq;
+ u16 tx_skb_offset;
+ u16 spi_data_tx_buf_offset;
+ u16 tx_credits;
+ u8 rx_chunks_available;
+ bool rx_buf_overflow;
+ bool int_flag;
+};
+
+enum oa_tc6_header_type {
+ OA_TC6_CTRL_HEADER,
+ OA_TC6_DATA_HEADER,
+};
+
+enum oa_tc6_register_op {
+ OA_TC6_CTRL_REG_READ = 0,
+ OA_TC6_CTRL_REG_WRITE = 1,
+};
+
+enum oa_tc6_data_valid_info {
+ OA_TC6_DATA_INVALID,
+ OA_TC6_DATA_VALID,
+};
+
+enum oa_tc6_data_start_valid_info {
+ OA_TC6_DATA_START_INVALID,
+ OA_TC6_DATA_START_VALID,
+};
+
+enum oa_tc6_data_end_valid_info {
+ OA_TC6_DATA_END_INVALID,
+ OA_TC6_DATA_END_VALID,
+};
+
+static int oa_tc6_spi_transfer(struct oa_tc6 *tc6,
+ enum oa_tc6_header_type header_type, u16 length)
+{
+ struct spi_transfer xfer = { 0 };
+ struct spi_message msg;
+
+ if (header_type == OA_TC6_DATA_HEADER) {
+ xfer.tx_buf = tc6->spi_data_tx_buf;
+ xfer.rx_buf = tc6->spi_data_rx_buf;
+ } else {
+ xfer.tx_buf = tc6->spi_ctrl_tx_buf;
+ xfer.rx_buf = tc6->spi_ctrl_rx_buf;
+ }
+ xfer.len = length;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(tc6->spi, &msg);
+}
+
+static int oa_tc6_get_parity(u32 p)
+{
+ /* Public domain code snippet, lifted from
+ * http://www-graphics.stanford.edu/~seander/bithacks.html
+ */
+ p ^= p >> 1;
+ p ^= p >> 2;
+ p = (p & 0x11111111U) * 0x11111111U;
+
+ /* Odd parity is used here */
+ return !((p >> 28) & 1);
+}
+
+static __be32 oa_tc6_prepare_ctrl_header(u32 addr, u8 length,
+ enum oa_tc6_register_op reg_op)
+{
+ u32 header;
+
+ header = FIELD_PREP(OA_TC6_CTRL_HEADER_DATA_NOT_CTRL,
+ OA_TC6_CTRL_HEADER) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_WRITE_NOT_READ, reg_op) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR, addr >> 16) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_ADDR, addr) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_LENGTH, length - 1);
+ header |= FIELD_PREP(OA_TC6_CTRL_HEADER_PARITY,
+ oa_tc6_get_parity(header));
+
+ return cpu_to_be32(header);
+}
+
+static void oa_tc6_update_ctrl_write_data(struct oa_tc6 *tc6, u32 value[],
+ u8 length)
+{
+ __be32 *tx_buf = tc6->spi_ctrl_tx_buf + OA_TC6_CTRL_HEADER_SIZE;
+
+ for (int i = 0; i < length; i++)
+ *tx_buf++ = cpu_to_be32(value[i]);
+}
+
+static u16 oa_tc6_calculate_ctrl_buf_size(u8 length)
+{
+ /* Control command consists 4 bytes header + 4 bytes register value for
+ * each register + 4 bytes ignored value.
+ */
+ return OA_TC6_CTRL_HEADER_SIZE + OA_TC6_CTRL_REG_VALUE_SIZE * length +
+ OA_TC6_CTRL_IGNORED_SIZE;
+}
+
+static void oa_tc6_prepare_ctrl_spi_buf(struct oa_tc6 *tc6, u32 address,
+ u32 value[], u8 length,
+ enum oa_tc6_register_op reg_op)
+{
+ __be32 *tx_buf = tc6->spi_ctrl_tx_buf;
+
+ *tx_buf = oa_tc6_prepare_ctrl_header(address, length, reg_op);
+
+ if (reg_op == OA_TC6_CTRL_REG_WRITE)
+ oa_tc6_update_ctrl_write_data(tc6, value, length);
+}
+
+static int oa_tc6_check_ctrl_write_reply(struct oa_tc6 *tc6, u8 size)
+{
+ u8 *tx_buf = tc6->spi_ctrl_tx_buf;
+ u8 *rx_buf = tc6->spi_ctrl_rx_buf;
+
+ rx_buf += OA_TC6_CTRL_IGNORED_SIZE;
+
+ /* The echoed control write must match with the one that was
+ * transmitted.
+ */
+ if (memcmp(tx_buf, rx_buf, size - OA_TC6_CTRL_IGNORED_SIZE))
+ return -EPROTO;
+
+ return 0;
+}
+
+static int oa_tc6_check_ctrl_read_reply(struct oa_tc6 *tc6, u8 size)
+{
+ u32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE;
+ u32 *tx_buf = tc6->spi_ctrl_tx_buf;
+
+ /* The echoed control read header must match with the one that was
+ * transmitted.
+ */
+ if (*tx_buf != *rx_buf)
+ return -EPROTO;
+
+ return 0;
+}
+
+static void oa_tc6_copy_ctrl_read_data(struct oa_tc6 *tc6, u32 value[],
+ u8 length)
+{
+ __be32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE +
+ OA_TC6_CTRL_HEADER_SIZE;
+
+ for (int i = 0; i < length; i++)
+ value[i] = be32_to_cpu(*rx_buf++);
+}
+
+static int oa_tc6_perform_ctrl(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length, enum oa_tc6_register_op reg_op)
+{
+ u16 size;
+ int ret;
+
+ /* Prepare control command and copy to SPI control buffer */
+ oa_tc6_prepare_ctrl_spi_buf(tc6, address, value, length, reg_op);
+
+ size = oa_tc6_calculate_ctrl_buf_size(length);
+
+ /* Perform SPI transfer */
+ ret = oa_tc6_spi_transfer(tc6, OA_TC6_CTRL_HEADER, size);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "SPI transfer failed for control: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Check echoed/received control write command reply for errors */
+ if (reg_op == OA_TC6_CTRL_REG_WRITE)
+ return oa_tc6_check_ctrl_write_reply(tc6, size);
+
+ /* Check echoed/received control read command reply for errors */
+ ret = oa_tc6_check_ctrl_read_reply(tc6, size);
+ if (ret)
+ return ret;
+
+ oa_tc6_copy_ctrl_read_data(tc6, value, length);
+
+ return 0;
+}
+
+/**
+ * oa_tc6_read_registers - function for reading multiple consecutive registers.
+ * @tc6: oa_tc6 struct.
+ * @address: address of the first register to be read in the MAC-PHY.
+ * @value: values to be read from the starting register address @address.
+ * @length: number of consecutive registers to be read from @address.
+ *
+ * Maximum of 128 consecutive registers can be read starting at @address.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length)
+{
+ int ret;
+
+ if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) {
+ dev_err(&tc6->spi->dev, "Invalid register length parameter\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&tc6->spi_ctrl_lock);
+ ret = oa_tc6_perform_ctrl(tc6, address, value, length,
+ OA_TC6_CTRL_REG_READ);
+ mutex_unlock(&tc6->spi_ctrl_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_read_registers);
+
+/**
+ * oa_tc6_read_register - function for reading a MAC-PHY register.
+ * @tc6: oa_tc6 struct.
+ * @address: register address of the MAC-PHY to be read.
+ * @value: value read from the @address register address of the MAC-PHY.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, u32 *value)
+{
+ return oa_tc6_read_registers(tc6, address, value, 1);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_read_register);
+
+/**
+ * oa_tc6_write_registers - function for writing multiple consecutive registers.
+ * @tc6: oa_tc6 struct.
+ * @address: address of the first register to be written in the MAC-PHY.
+ * @value: values to be written from the starting register address @address.
+ * @length: number of consecutive registers to be written from @address.
+ *
+ * Maximum of 128 consecutive registers can be written starting at @address.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length)
+{
+ int ret;
+
+ if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) {
+ dev_err(&tc6->spi->dev, "Invalid register length parameter\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&tc6->spi_ctrl_lock);
+ ret = oa_tc6_perform_ctrl(tc6, address, value, length,
+ OA_TC6_CTRL_REG_WRITE);
+ mutex_unlock(&tc6->spi_ctrl_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_write_registers);
+
+/**
+ * oa_tc6_write_register - function for writing a MAC-PHY register.
+ * @tc6: oa_tc6 struct.
+ * @address: register address of the MAC-PHY to be written.
+ * @value: value to be written in the @address register address of the MAC-PHY.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, u32 value)
+{
+ return oa_tc6_write_registers(tc6, address, &value, 1);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_write_register);
+
+static int oa_tc6_check_phy_reg_direct_access_capability(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_STDCAP, &regval);
+ if (ret)
+ return ret;
+
+ if (!(regval & STDCAP_DIRECT_PHY_REG_ACCESS))
+ return -ENODEV;
+
+ return 0;
+}
+
+static void oa_tc6_handle_link_change(struct net_device *netdev)
+{
+ phy_print_status(netdev->phydev);
+}
+
+static int oa_tc6_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+ u32 regval;
+ bool ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE |
+ (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK),
+ &regval);
+ if (ret)
+ return ret;
+
+ return regval;
+}
+
+static int oa_tc6_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+
+ return oa_tc6_write_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE |
+ (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK),
+ val);
+}
+
+static int oa_tc6_get_phy_c45_mms(int devnum)
+{
+ switch (devnum) {
+ case MDIO_MMD_PCS:
+ return OA_TC6_PHY_C45_PCS_MMS2;
+ case MDIO_MMD_PMAPMD:
+ return OA_TC6_PHY_C45_PMA_PMD_MMS3;
+ case MDIO_MMD_VEND2:
+ return OA_TC6_PHY_C45_VS_PLCA_MMS4;
+ case MDIO_MMD_AN:
+ return OA_TC6_PHY_C45_AUTO_NEG_MMS5;
+ case MDIO_MMD_POWER_UNIT:
+ return OA_TC6_PHY_C45_POWER_UNIT_MMS6;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int oa_tc6_mdiobus_read_c45(struct mii_bus *bus, int addr, int devnum,
+ int regnum)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_get_phy_c45_mms(devnum);
+ if (ret < 0)
+ return ret;
+
+ ret = oa_tc6_read_register(tc6, (ret << 16) | regnum, &regval);
+ if (ret)
+ return ret;
+
+ return regval;
+}
+
+static int oa_tc6_mdiobus_write_c45(struct mii_bus *bus, int addr, int devnum,
+ int regnum, u16 val)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+ int ret;
+
+ ret = oa_tc6_get_phy_c45_mms(devnum);
+ if (ret < 0)
+ return ret;
+
+ return oa_tc6_write_register(tc6, (ret << 16) | regnum, val);
+}
+
+static int oa_tc6_mdiobus_register(struct oa_tc6 *tc6)
+{
+ int ret;
+
+ tc6->mdiobus = mdiobus_alloc();
+ if (!tc6->mdiobus) {
+ netdev_err(tc6->netdev, "MDIO bus alloc failed\n");
+ return -ENOMEM;
+ }
+
+ tc6->mdiobus->priv = tc6;
+ tc6->mdiobus->read = oa_tc6_mdiobus_read;
+ tc6->mdiobus->write = oa_tc6_mdiobus_write;
+ /* OPEN Alliance 10BASE-T1x compliance MAC-PHYs will have both C22 and
+ * C45 registers space. If the PHY is discovered via C22 bus protocol it
+ * assumes it uses C22 protocol and always uses C22 registers indirect
+ * access to access C45 registers. This is because, we don't have a
+ * clean separation between C22/C45 register space and C22/C45 MDIO bus
+ * protocols. Resulting, PHY C45 registers direct access can't be used
+ * which can save multiple SPI bus access. To support this feature, PHY
+ * drivers can set .read_mmd/.write_mmd in the PHY driver to call
+ * .read_c45/.write_c45. Ex: drivers/net/phy/microchip_t1s.c
+ */
+ tc6->mdiobus->read_c45 = oa_tc6_mdiobus_read_c45;
+ tc6->mdiobus->write_c45 = oa_tc6_mdiobus_write_c45;
+ tc6->mdiobus->name = "oa-tc6-mdiobus";
+ tc6->mdiobus->parent = tc6->dev;
+
+ snprintf(tc6->mdiobus->id, ARRAY_SIZE(tc6->mdiobus->id), "%s",
+ dev_name(&tc6->spi->dev));
+
+ ret = mdiobus_register(tc6->mdiobus);
+ if (ret) {
+ netdev_err(tc6->netdev, "Could not register MDIO bus\n");
+ mdiobus_free(tc6->mdiobus);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void oa_tc6_mdiobus_unregister(struct oa_tc6 *tc6)
+{
+ mdiobus_unregister(tc6->mdiobus);
+ mdiobus_free(tc6->mdiobus);
+}
+
+static int oa_tc6_phy_init(struct oa_tc6 *tc6)
+{
+ int ret;
+
+ ret = oa_tc6_check_phy_reg_direct_access_capability(tc6);
+ if (ret) {
+ netdev_err(tc6->netdev,
+ "Direct PHY register access is not supported by the MAC-PHY\n");
+ return ret;
+ }
+
+ ret = oa_tc6_mdiobus_register(tc6);
+ if (ret)
+ return ret;
+
+ tc6->phydev = phy_find_first(tc6->mdiobus);
+ if (!tc6->phydev) {
+ netdev_err(tc6->netdev, "No PHY found\n");
+ oa_tc6_mdiobus_unregister(tc6);
+ return -ENODEV;
+ }
+
+ tc6->phydev->is_internal = true;
+ ret = phy_connect_direct(tc6->netdev, tc6->phydev,
+ &oa_tc6_handle_link_change,
+ PHY_INTERFACE_MODE_INTERNAL);
+ if (ret) {
+ netdev_err(tc6->netdev, "Can't attach PHY to %s\n",
+ tc6->mdiobus->id);
+ oa_tc6_mdiobus_unregister(tc6);
+ return ret;
+ }
+
+ phy_attached_info(tc6->netdev->phydev);
+
+ return 0;
+}
+
+static void oa_tc6_phy_exit(struct oa_tc6 *tc6)
+{
+ phy_disconnect(tc6->phydev);
+ oa_tc6_mdiobus_unregister(tc6);
+}
+
+static int oa_tc6_read_status0(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &regval);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "STATUS0 register read failed: %d\n",
+ ret);
+ return 0;
+ }
+
+ return regval;
+}
+
+static int oa_tc6_sw_reset_macphy(struct oa_tc6 *tc6)
+{
+ u32 regval = RESET_SWRESET;
+ int ret;
+
+ ret = oa_tc6_write_register(tc6, OA_TC6_REG_RESET, regval);
+ if (ret)
+ return ret;
+
+ /* Poll for soft reset complete for every 1ms until 1s timeout */
+ ret = readx_poll_timeout(oa_tc6_read_status0, tc6, regval,
+ regval & STATUS0_RESETC,
+ STATUS0_RESETC_POLL_DELAY,
+ STATUS0_RESETC_POLL_TIMEOUT);
+ if (ret)
+ return -ENODEV;
+
+ /* Clear the reset complete status */
+ return oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, regval);
+}
+
+static int oa_tc6_unmask_macphy_error_interrupts(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_INT_MASK0, &regval);
+ if (ret)
+ return ret;
+
+ regval &= ~(INT_MASK0_TX_PROTOCOL_ERR_MASK |
+ INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK |
+ INT_MASK0_LOSS_OF_FRAME_ERR_MASK |
+ INT_MASK0_HEADER_ERR_MASK);
+
+ return oa_tc6_write_register(tc6, OA_TC6_REG_INT_MASK0, regval);
+}
+
+static int oa_tc6_enable_data_transfer(struct oa_tc6 *tc6)
+{
+ u32 value;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &value);
+ if (ret)
+ return ret;
+
+ /* Enable configuration synchronization for data transfer */
+ value |= CONFIG0_SYNC;
+
+ return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, value);
+}
+
+static void oa_tc6_cleanup_ongoing_rx_skb(struct oa_tc6 *tc6)
+{
+ if (tc6->rx_skb) {
+ tc6->netdev->stats.rx_dropped++;
+ kfree_skb(tc6->rx_skb);
+ tc6->rx_skb = NULL;
+ }
+}
+
+static void oa_tc6_cleanup_ongoing_tx_skb(struct oa_tc6 *tc6)
+{
+ if (tc6->ongoing_tx_skb) {
+ tc6->netdev->stats.tx_dropped++;
+ kfree_skb(tc6->ongoing_tx_skb);
+ tc6->ongoing_tx_skb = NULL;
+ }
+}
+
+static int oa_tc6_process_extended_status(struct oa_tc6 *tc6)
+{
+ u32 value;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &value);
+ if (ret) {
+ netdev_err(tc6->netdev, "STATUS0 register read failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Clear the error interrupts status */
+ ret = oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, value);
+ if (ret) {
+ netdev_err(tc6->netdev, "STATUS0 register write failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (FIELD_GET(STATUS0_RX_BUFFER_OVERFLOW_ERROR, value)) {
+ tc6->rx_buf_overflow = true;
+ oa_tc6_cleanup_ongoing_rx_skb(tc6);
+ net_err_ratelimited("%s: Receive buffer overflow error\n",
+ tc6->netdev->name);
+ return -EAGAIN;
+ }
+ if (FIELD_GET(STATUS0_TX_PROTOCOL_ERROR, value)) {
+ netdev_err(tc6->netdev, "Transmit protocol error\n");
+ return -ENODEV;
+ }
+ /* TODO: Currently loss of frame and header errors are treated as
+ * non-recoverable errors. They will be handled in the next version.
+ */
+ if (FIELD_GET(STATUS0_LOSS_OF_FRAME_ERROR, value)) {
+ netdev_err(tc6->netdev, "Loss of frame error\n");
+ return -ENODEV;
+ }
+ if (FIELD_GET(STATUS0_HEADER_ERROR, value)) {
+ netdev_err(tc6->netdev, "Header error\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int oa_tc6_process_rx_chunk_footer(struct oa_tc6 *tc6, u32 footer)
+{
+ /* Process rx chunk footer for the following,
+ * 1. tx credits
+ * 2. errors if any from MAC-PHY
+ * 3. receive chunks available
+ */
+ tc6->tx_credits = FIELD_GET(OA_TC6_DATA_FOOTER_TX_CREDITS, footer);
+ tc6->rx_chunks_available = FIELD_GET(OA_TC6_DATA_FOOTER_RX_CHUNKS,
+ footer);
+
+ if (FIELD_GET(OA_TC6_DATA_FOOTER_EXTENDED_STS, footer)) {
+ int ret = oa_tc6_process_extended_status(tc6);
+
+ if (ret)
+ return ret;
+ }
+
+ /* TODO: Currently received header bad and configuration unsync errors
+ * are treated as non-recoverable errors. They will be handled in the
+ * next version.
+ */
+ if (FIELD_GET(OA_TC6_DATA_FOOTER_RXD_HEADER_BAD, footer)) {
+ netdev_err(tc6->netdev, "Rxd header bad error\n");
+ return -ENODEV;
+ }
+
+ if (!FIELD_GET(OA_TC6_DATA_FOOTER_CONFIG_SYNC, footer)) {
+ netdev_err(tc6->netdev, "Config unsync error\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void oa_tc6_submit_rx_skb(struct oa_tc6 *tc6)
+{
+ tc6->rx_skb->protocol = eth_type_trans(tc6->rx_skb, tc6->netdev);
+ tc6->netdev->stats.rx_packets++;
+ tc6->netdev->stats.rx_bytes += tc6->rx_skb->len;
+
+ netif_rx(tc6->rx_skb);
+
+ tc6->rx_skb = NULL;
+}
+
+static void oa_tc6_update_rx_skb(struct oa_tc6 *tc6, u8 *payload, u8 length)
+{
+ memcpy(skb_put(tc6->rx_skb, length), payload, length);
+}
+
+static int oa_tc6_allocate_rx_skb(struct oa_tc6 *tc6)
+{
+ tc6->rx_skb = netdev_alloc_skb_ip_align(tc6->netdev, tc6->netdev->mtu +
+ ETH_HLEN + ETH_FCS_LEN);
+ if (!tc6->rx_skb) {
+ tc6->netdev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int oa_tc6_prcs_complete_rx_frame(struct oa_tc6 *tc6, u8 *payload,
+ u16 size)
+{
+ int ret;
+
+ ret = oa_tc6_allocate_rx_skb(tc6);
+ if (ret)
+ return ret;
+
+ oa_tc6_update_rx_skb(tc6, payload, size);
+
+ oa_tc6_submit_rx_skb(tc6);
+
+ return 0;
+}
+
+static int oa_tc6_prcs_rx_frame_start(struct oa_tc6 *tc6, u8 *payload, u16 size)
+{
+ int ret;
+
+ ret = oa_tc6_allocate_rx_skb(tc6);
+ if (ret)
+ return ret;
+
+ oa_tc6_update_rx_skb(tc6, payload, size);
+
+ return 0;
+}
+
+static void oa_tc6_prcs_rx_frame_end(struct oa_tc6 *tc6, u8 *payload, u16 size)
+{
+ oa_tc6_update_rx_skb(tc6, payload, size);
+
+ oa_tc6_submit_rx_skb(tc6);
+}
+
+static void oa_tc6_prcs_ongoing_rx_frame(struct oa_tc6 *tc6, u8 *payload,
+ u32 footer)
+{
+ oa_tc6_update_rx_skb(tc6, payload, OA_TC6_CHUNK_PAYLOAD_SIZE);
+}
+
+static int oa_tc6_prcs_rx_chunk_payload(struct oa_tc6 *tc6, u8 *data,
+ u32 footer)
+{
+ u8 start_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_START_WORD_OFFSET,
+ footer) * sizeof(u32);
+ u8 end_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_END_BYTE_OFFSET,
+ footer);
+ bool start_valid = FIELD_GET(OA_TC6_DATA_FOOTER_START_VALID, footer);
+ bool end_valid = FIELD_GET(OA_TC6_DATA_FOOTER_END_VALID, footer);
+ u16 size;
+
+ /* Restart the new rx frame after receiving rx buffer overflow error */
+ if (start_valid && tc6->rx_buf_overflow)
+ tc6->rx_buf_overflow = false;
+
+ if (tc6->rx_buf_overflow)
+ return 0;
+
+ /* Process the chunk with complete rx frame */
+ if (start_valid && end_valid && start_byte_offset < end_byte_offset) {
+ size = end_byte_offset + 1 - start_byte_offset;
+ return oa_tc6_prcs_complete_rx_frame(tc6,
+ &data[start_byte_offset],
+ size);
+ }
+
+ /* Process the chunk with only rx frame start */
+ if (start_valid && !end_valid) {
+ size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
+ return oa_tc6_prcs_rx_frame_start(tc6,
+ &data[start_byte_offset],
+ size);
+ }
+
+ /* Process the chunk with only rx frame end */
+ if (end_valid && !start_valid) {
+ size = end_byte_offset + 1;
+ oa_tc6_prcs_rx_frame_end(tc6, data, size);
+ return 0;
+ }
+
+ /* Process the chunk with previous rx frame end and next rx frame
+ * start.
+ */
+ if (start_valid && end_valid && start_byte_offset > end_byte_offset) {
+ /* After rx buffer overflow error received, there might be a
+ * possibility of getting an end valid of a previously
+ * incomplete rx frame along with the new rx frame start valid.
+ */
+ if (tc6->rx_skb) {
+ size = end_byte_offset + 1;
+ oa_tc6_prcs_rx_frame_end(tc6, data, size);
+ }
+ size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
+ return oa_tc6_prcs_rx_frame_start(tc6,
+ &data[start_byte_offset],
+ size);
+ }
+
+ /* Process the chunk with ongoing rx frame data */
+ oa_tc6_prcs_ongoing_rx_frame(tc6, data, footer);
+
+ return 0;
+}
+
+static u32 oa_tc6_get_rx_chunk_footer(struct oa_tc6 *tc6, u16 footer_offset)
+{
+ u8 *rx_buf = tc6->spi_data_rx_buf;
+ __be32 footer;
+
+ footer = *((__be32 *)&rx_buf[footer_offset]);
+
+ return be32_to_cpu(footer);
+}
+
+static int oa_tc6_process_spi_data_rx_buf(struct oa_tc6 *tc6, u16 length)
+{
+ u16 no_of_rx_chunks = length / OA_TC6_CHUNK_SIZE;
+ u32 footer;
+ int ret;
+
+ /* All the rx chunks in the receive SPI data buffer are examined here */
+ for (int i = 0; i < no_of_rx_chunks; i++) {
+ /* Last 4 bytes in each received chunk consist footer info */
+ footer = oa_tc6_get_rx_chunk_footer(tc6, i * OA_TC6_CHUNK_SIZE +
+ OA_TC6_CHUNK_PAYLOAD_SIZE);
+
+ ret = oa_tc6_process_rx_chunk_footer(tc6, footer);
+ if (ret)
+ return ret;
+
+ /* If there is a data valid chunks then process it for the
+ * information needed to determine the validity and the location
+ * of the receive frame data.
+ */
+ if (FIELD_GET(OA_TC6_DATA_FOOTER_DATA_VALID, footer)) {
+ u8 *payload = tc6->spi_data_rx_buf + i *
+ OA_TC6_CHUNK_SIZE;
+
+ ret = oa_tc6_prcs_rx_chunk_payload(tc6, payload,
+ footer);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static __be32 oa_tc6_prepare_data_header(bool data_valid, bool start_valid,
+ bool end_valid, u8 end_byte_offset)
+{
+ u32 header = FIELD_PREP(OA_TC6_DATA_HEADER_DATA_NOT_CTRL,
+ OA_TC6_DATA_HEADER) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_DATA_VALID, data_valid) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_START_VALID, start_valid) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_END_VALID, end_valid) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_END_BYTE_OFFSET,
+ end_byte_offset);
+
+ header |= FIELD_PREP(OA_TC6_DATA_HEADER_PARITY,
+ oa_tc6_get_parity(header));
+
+ return cpu_to_be32(header);
+}
+
+static void oa_tc6_add_tx_skb_to_spi_buf(struct oa_tc6 *tc6)
+{
+ enum oa_tc6_data_end_valid_info end_valid = OA_TC6_DATA_END_INVALID;
+ __be32 *tx_buf = tc6->spi_data_tx_buf + tc6->spi_data_tx_buf_offset;
+ u16 remaining_len = tc6->ongoing_tx_skb->len - tc6->tx_skb_offset;
+ u8 *tx_skb_data = tc6->ongoing_tx_skb->data + tc6->tx_skb_offset;
+ enum oa_tc6_data_start_valid_info start_valid;
+ u8 end_byte_offset = 0;
+ u16 length_to_copy;
+
+ /* Initial value is assigned here to avoid more than 80 characters in
+ * the declaration place.
+ */
+ start_valid = OA_TC6_DATA_START_INVALID;
+
+ /* Set start valid if the current tx chunk contains the start of the tx
+ * ethernet frame.
+ */
+ if (!tc6->tx_skb_offset)
+ start_valid = OA_TC6_DATA_START_VALID;
+
+ /* If the remaining tx skb length is more than the chunk payload size of
+ * 64 bytes then copy only 64 bytes and leave the ongoing tx skb for
+ * next tx chunk.
+ */
+ length_to_copy = min_t(u16, remaining_len, OA_TC6_CHUNK_PAYLOAD_SIZE);
+
+ /* Copy the tx skb data to the tx chunk payload buffer */
+ memcpy(tx_buf + 1, tx_skb_data, length_to_copy);
+ tc6->tx_skb_offset += length_to_copy;
+
+ /* Set end valid if the current tx chunk contains the end of the tx
+ * ethernet frame.
+ */
+ if (tc6->ongoing_tx_skb->len == tc6->tx_skb_offset) {
+ end_valid = OA_TC6_DATA_END_VALID;
+ end_byte_offset = length_to_copy - 1;
+ tc6->tx_skb_offset = 0;
+ tc6->netdev->stats.tx_bytes += tc6->ongoing_tx_skb->len;
+ tc6->netdev->stats.tx_packets++;
+ kfree_skb(tc6->ongoing_tx_skb);
+ tc6->ongoing_tx_skb = NULL;
+ }
+
+ *tx_buf = oa_tc6_prepare_data_header(OA_TC6_DATA_VALID, start_valid,
+ end_valid, end_byte_offset);
+ tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
+}
+
+static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6)
+{
+ u16 used_tx_credits;
+
+ /* Get tx skbs and convert them into tx chunks based on the tx credits
+ * available.
+ */
+ for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits;
+ used_tx_credits++) {
+ if (!tc6->ongoing_tx_skb) {
+ tc6->ongoing_tx_skb = tc6->waiting_tx_skb;
+ tc6->waiting_tx_skb = NULL;
+ }
+ if (!tc6->ongoing_tx_skb)
+ break;
+ oa_tc6_add_tx_skb_to_spi_buf(tc6);
+ }
+
+ return used_tx_credits * OA_TC6_CHUNK_SIZE;
+}
+
+static void oa_tc6_add_empty_chunks_to_spi_buf(struct oa_tc6 *tc6,
+ u16 needed_empty_chunks)
+{
+ __be32 header;
+
+ header = oa_tc6_prepare_data_header(OA_TC6_DATA_INVALID,
+ OA_TC6_DATA_START_INVALID,
+ OA_TC6_DATA_END_INVALID, 0);
+
+ while (needed_empty_chunks--) {
+ __be32 *tx_buf = tc6->spi_data_tx_buf +
+ tc6->spi_data_tx_buf_offset;
+
+ *tx_buf = header;
+ tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
+ }
+}
+
+static u16 oa_tc6_prepare_spi_tx_buf_for_rx_chunks(struct oa_tc6 *tc6, u16 len)
+{
+ u16 tx_chunks = len / OA_TC6_CHUNK_SIZE;
+ u16 needed_empty_chunks;
+
+ /* If there are more chunks to receive than to transmit, we need to add
+ * enough empty tx chunks to allow the reception of the excess rx
+ * chunks.
+ */
+ if (tx_chunks >= tc6->rx_chunks_available)
+ return len;
+
+ needed_empty_chunks = tc6->rx_chunks_available - tx_chunks;
+
+ oa_tc6_add_empty_chunks_to_spi_buf(tc6, needed_empty_chunks);
+
+ return needed_empty_chunks * OA_TC6_CHUNK_SIZE + len;
+}
+
+static int oa_tc6_try_spi_transfer(struct oa_tc6 *tc6)
+{
+ int ret;
+
+ while (true) {
+ u16 spi_len = 0;
+
+ tc6->spi_data_tx_buf_offset = 0;
+
+ if (tc6->ongoing_tx_skb || tc6->waiting_tx_skb)
+ spi_len = oa_tc6_prepare_spi_tx_buf_for_tx_skbs(tc6);
+
+ spi_len = oa_tc6_prepare_spi_tx_buf_for_rx_chunks(tc6, spi_len);
+
+ if (tc6->int_flag) {
+ tc6->int_flag = false;
+ if (spi_len == 0) {
+ oa_tc6_add_empty_chunks_to_spi_buf(tc6, 1);
+ spi_len = OA_TC6_CHUNK_SIZE;
+ }
+ }
+
+ if (spi_len == 0)
+ break;
+
+ ret = oa_tc6_spi_transfer(tc6, OA_TC6_DATA_HEADER, spi_len);
+ if (ret) {
+ netdev_err(tc6->netdev, "SPI data transfer failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_process_spi_data_rx_buf(tc6, spi_len);
+ if (ret) {
+ if (ret == -EAGAIN)
+ continue;
+
+ oa_tc6_cleanup_ongoing_tx_skb(tc6);
+ oa_tc6_cleanup_ongoing_rx_skb(tc6);
+ netdev_err(tc6->netdev, "Device error: %d\n", ret);
+ return ret;
+ }
+
+ if (!tc6->waiting_tx_skb && netif_queue_stopped(tc6->netdev))
+ netif_wake_queue(tc6->netdev);
+ }
+
+ return 0;
+}
+
+static int oa_tc6_spi_thread_handler(void *data)
+{
+ struct oa_tc6 *tc6 = data;
+ int ret;
+
+ while (likely(!kthread_should_stop())) {
+ /* This kthread will be waken up if there is a tx skb or mac-phy
+ * interrupt to perform spi transfer with tx chunks.
+ */
+ wait_event_interruptible(tc6->spi_wq, tc6->waiting_tx_skb ||
+ tc6->int_flag ||
+ kthread_should_stop());
+
+ if (kthread_should_stop())
+ break;
+
+ ret = oa_tc6_try_spi_transfer(tc6);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int oa_tc6_update_buffer_status_from_register(struct oa_tc6 *tc6)
+{
+ u32 value;
+ int ret;
+
+ /* Initially tx credits and rx chunks available to be updated from the
+ * register as there is no data transfer performed yet. Later they will
+ * be updated from the rx footer.
+ */
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_BUFFER_STATUS, &value);
+ if (ret)
+ return ret;
+
+ tc6->tx_credits = FIELD_GET(BUFFER_STATUS_TX_CREDITS_AVAILABLE, value);
+ tc6->rx_chunks_available = FIELD_GET(BUFFER_STATUS_RX_CHUNKS_AVAILABLE,
+ value);
+
+ return 0;
+}
+
+static irqreturn_t oa_tc6_macphy_isr(int irq, void *data)
+{
+ struct oa_tc6 *tc6 = data;
+
+ /* MAC-PHY interrupt can occur for the following reasons.
+ * - availability of tx credits if it was 0 before and not reported in
+ * the previous rx footer.
+ * - availability of rx chunks if it was 0 before and not reported in
+ * the previous rx footer.
+ * - extended status event not reported in the previous rx footer.
+ */
+ tc6->int_flag = true;
+ /* Wake spi kthread to perform spi transfer */
+ wake_up_interruptible(&tc6->spi_wq);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * oa_tc6_zero_align_receive_frame_enable - function to enable zero align
+ * receive frame feature.
+ * @tc6: oa_tc6 struct.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &regval);
+ if (ret)
+ return ret;
+
+ /* Set Zero-Align Receive Frame Enable */
+ regval |= CONFIG0_ZARFE_ENABLE;
+
+ return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, regval);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_zero_align_receive_frame_enable);
+
+/**
+ * oa_tc6_start_xmit - function for sending the tx skb which consists ethernet
+ * frame.
+ * @tc6: oa_tc6 struct.
+ * @skb: socket buffer in which the ethernet frame is stored.
+ *
+ * Return: NETDEV_TX_OK if the transmit ethernet frame skb added in the tx_skb_q
+ * otherwise returns NETDEV_TX_BUSY.
+ */
+netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb)
+{
+ if (tc6->waiting_tx_skb) {
+ netif_stop_queue(tc6->netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ tc6->netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ tc6->waiting_tx_skb = skb;
+
+ /* Wake spi kthread to perform spi transfer */
+ wake_up_interruptible(&tc6->spi_wq);
+
+ return NETDEV_TX_OK;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_start_xmit);
+
+/**
+ * oa_tc6_init - allocates and initializes oa_tc6 structure.
+ * @spi: device with which data will be exchanged.
+ * @netdev: network device interface structure.
+ *
+ * Return: pointer reference to the oa_tc6 structure if the MAC-PHY
+ * initialization is successful otherwise NULL.
+ */
+struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
+{
+ struct oa_tc6 *tc6;
+ int ret;
+
+ tc6 = devm_kzalloc(&spi->dev, sizeof(*tc6), GFP_KERNEL);
+ if (!tc6)
+ return NULL;
+
+ tc6->spi = spi;
+ tc6->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &spi->dev);
+ mutex_init(&tc6->spi_ctrl_lock);
+
+ /* Set the SPI controller to pump at realtime priority */
+ tc6->spi->rt = true;
+ spi_setup(tc6->spi);
+
+ tc6->spi_ctrl_tx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_CTRL_SPI_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_ctrl_tx_buf)
+ return NULL;
+
+ tc6->spi_ctrl_rx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_CTRL_SPI_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_ctrl_rx_buf)
+ return NULL;
+
+ tc6->spi_data_tx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_SPI_DATA_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_data_tx_buf)
+ return NULL;
+
+ tc6->spi_data_rx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_SPI_DATA_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_data_rx_buf)
+ return NULL;
+
+ ret = oa_tc6_sw_reset_macphy(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "MAC-PHY software reset failed: %d\n", ret);
+ return NULL;
+ }
+
+ ret = oa_tc6_unmask_macphy_error_interrupts(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "MAC-PHY error interrupts unmask failed: %d\n", ret);
+ return NULL;
+ }
+
+ ret = oa_tc6_phy_init(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "MAC internal PHY initialization failed: %d\n", ret);
+ return NULL;
+ }
+
+ ret = oa_tc6_enable_data_transfer(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "Failed to enable data transfer: %d\n",
+ ret);
+ goto phy_exit;
+ }
+
+ ret = oa_tc6_update_buffer_status_from_register(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "Failed to update buffer status: %d\n", ret);
+ goto phy_exit;
+ }
+
+ init_waitqueue_head(&tc6->spi_wq);
+
+ tc6->spi_thread = kthread_run(oa_tc6_spi_thread_handler, tc6,
+ "oa-tc6-spi-thread");
+ if (IS_ERR(tc6->spi_thread)) {
+ dev_err(&tc6->spi->dev, "Failed to create SPI thread\n");
+ goto phy_exit;
+ }
+
+ sched_set_fifo(tc6->spi_thread);
+
+ ret = devm_request_irq(&tc6->spi->dev, tc6->spi->irq, oa_tc6_macphy_isr,
+ IRQF_TRIGGER_FALLING, dev_name(&tc6->spi->dev),
+ tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "Failed to request macphy isr %d\n",
+ ret);
+ goto kthread_stop;
+ }
+
+ /* oa_tc6_sw_reset_macphy() function resets and clears the MAC-PHY reset
+ * complete status. IRQ is also asserted on reset completion and it is
+ * remain asserted until MAC-PHY receives a data chunk. So performing an
+ * empty data chunk transmission will deassert the IRQ. Refer section
+ * 7.7 and 9.2.8.8 in the OPEN Alliance specification for more details.
+ */
+ tc6->int_flag = true;
+ wake_up_interruptible(&tc6->spi_wq);
+
+ return tc6;
+
+kthread_stop:
+ kthread_stop(tc6->spi_thread);
+phy_exit:
+ oa_tc6_phy_exit(tc6);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_init);
+
+/**
+ * oa_tc6_exit - exit function.
+ * @tc6: oa_tc6 struct.
+ */
+void oa_tc6_exit(struct oa_tc6 *tc6)
+{
+ oa_tc6_phy_exit(tc6);
+ kthread_stop(tc6->spi_thread);
+ dev_kfree_skb_any(tc6->ongoing_tx_skb);
+ dev_kfree_skb_any(tc6->waiting_tx_skb);
+ dev_kfree_skb_any(tc6->rx_skb);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_exit);
+
+MODULE_DESCRIPTION("OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface Lib");
+MODULE_AUTHOR("Parthiban Veerasooran <parthiban.veerasooran@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 1cc001087193..a36d422b5173 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -163,7 +163,7 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#include <linux/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <asm/cache.h>
static const char version[] =
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 640ac01689fb..c0515dc63246 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -102,7 +102,7 @@ static int gx_fix;
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <asm/io.h>
/* These identify the driver base version and may not be removed. */
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 62ba269da902..cb4e12df7719 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1699,8 +1699,9 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &mac->napi, pasemi_mac_poll);
- dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_GSO;
+ dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_GSO;
+ dev->lltx = true;
mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
if (!mac->dma_pdev) {
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 3f7519e435b8..01fe76786f77 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -23,6 +23,7 @@ config IONIC
depends on PTP_1588_CLOCK_OPTIONAL
select NET_DEVLINK
select DIMLIB
+ select PAGE_POOL
help
This enables the support for the Pensando family of Ethernet
adapters. More specific information on this driver can be
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 59e5a9f21105..c98b4e75e288 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -123,7 +123,7 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
struct ionic_cq *cq = &qcq->cq;
qcq_dentry = debugfs_create_dir(q->name, lif->dentry);
- if (IS_ERR_OR_NULL(qcq_dentry))
+ if (IS_ERR(qcq_dentry))
return;
qcq->dentry = qcq_dentry;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index c647033f3ad2..c8c710cfe70c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -32,7 +32,7 @@
#define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */
#define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
-#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 5) /* 5s */
+#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 4) /* 4s */
struct ionic_dev_bar {
void __iomem *vaddr;
@@ -181,10 +181,7 @@ struct ionic_queue;
struct ionic_qcq;
#define IONIC_MAX_BUF_LEN ((u16)-1)
-#define IONIC_PAGE_SIZE PAGE_SIZE
-#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2)
-#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
- __GFP_COMP | __GFP_MEMALLOC)
+#define IONIC_PAGE_SIZE MIN(PAGE_SIZE, IONIC_MAX_BUF_LEN)
#define IONIC_XDP_MAX_LINEAR_MTU (IONIC_PAGE_SIZE - \
(VLAN_ETH_HLEN + \
@@ -238,9 +235,8 @@ struct ionic_queue {
unsigned int index;
unsigned int num_descs;
unsigned int max_sg_elems;
+
u64 features;
- unsigned int type;
- unsigned int hw_index;
unsigned int hw_type;
bool xdp_flush;
union {
@@ -250,18 +246,23 @@ struct ionic_queue {
struct ionic_admin_cmd *adminq;
};
union {
- void __iomem *cmb_base;
- struct ionic_txq_desc __iomem *cmb_txq;
- struct ionic_rxq_desc __iomem *cmb_rxq;
- };
- union {
void *sg_base;
struct ionic_txq_sg_desc *txq_sgl;
struct ionic_txq_sg_desc_v1 *txq_sgl_v1;
struct ionic_rxq_sg_desc *rxq_sgl;
};
struct xdp_rxq_info *xdp_rxq_info;
+ struct bpf_prog *xdp_prog;
+ struct page_pool *page_pool;
struct ionic_queue *partner;
+
+ union {
+ void __iomem *cmb_base;
+ struct ionic_txq_desc __iomem *cmb_txq;
+ struct ionic_rxq_desc __iomem *cmb_rxq;
+ };
+ unsigned int type;
+ unsigned int hw_index;
dma_addr_t base_pa;
dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 4619fd74f3e3..dda22fa4448c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -989,8 +989,6 @@ static int ionic_get_ts_info(struct net_device *netdev,
info->phc_index = ptp_clock_index(lif->phc->ptp);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index aa0cc31dfe6e..40496587b2b3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -13,6 +13,7 @@
#include <linux/cpumask.h>
#include <linux/crash_dump.h>
#include <linux/vmalloc.h>
+#include <net/page_pool/helpers.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -46,8 +47,9 @@ static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_queue_identify(struct ionic_lif *lif);
-static int ionic_xdp_queues_config(struct ionic_lif *lif);
-static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q);
+static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif);
+static void ionic_unregister_rxq_info(struct ionic_queue *q);
+static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id);
static void ionic_dim_work(struct work_struct *work)
{
@@ -380,6 +382,7 @@ static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
if (!(qcq->flags & IONIC_QCQ_F_INITED))
return;
+ ionic_unregister_rxq_info(&qcq->q);
if (qcq->flags & IONIC_QCQ_F_INTR) {
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
@@ -437,9 +440,10 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->sg_base_pa = 0;
}
- ionic_xdp_unregister_rxq_info(&qcq->q);
- ionic_qcq_intr_free(lif, qcq);
+ page_pool_destroy(qcq->q.page_pool);
+ qcq->q.page_pool = NULL;
+ ionic_qcq_intr_free(lif, qcq);
vfree(qcq->q.info);
qcq->q.info = NULL;
}
@@ -553,7 +557,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int cq_desc_size,
unsigned int sg_desc_size,
unsigned int desc_info_size,
- unsigned int pid, struct ionic_qcq **qcq)
+ unsigned int pid, struct bpf_prog *xdp_prog,
+ struct ionic_qcq **qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
struct device *dev = lif->ionic->dev;
@@ -579,6 +584,31 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
goto err_out_free_qcq;
}
+ if (type == IONIC_QTYPE_RXQ) {
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = 0,
+ .pool_size = num_descs,
+ .nid = NUMA_NO_NODE,
+ .dev = lif->ionic->dev,
+ .napi = &new->napi,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = PAGE_SIZE,
+ .netdev = lif->netdev,
+ };
+
+ if (xdp_prog)
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+
+ new->q.page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(new->q.page_pool)) {
+ netdev_err(lif->netdev, "Cannot create page_pool\n");
+ err = PTR_ERR(new->q.page_pool);
+ new->q.page_pool = NULL;
+ goto err_out_free_q_info;
+ }
+ }
+
new->q.type = type;
new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
@@ -586,12 +616,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
desc_size, sg_desc_size, pid);
if (err) {
netdev_err(lif->netdev, "Cannot initialize queue\n");
- goto err_out_free_q_info;
+ goto err_out_free_page_pool;
}
err = ionic_alloc_qcq_interrupt(lif, new);
if (err)
- goto err_out_free_q_info;
+ goto err_out_free_page_pool;
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) {
@@ -712,6 +742,8 @@ err_out_free_irq:
devm_free_irq(dev, new->intr.vector, &new->napi);
ionic_intr_free(lif->ionic, new->intr.index);
}
+err_out_free_page_pool:
+ page_pool_destroy(new->q.page_pool);
err_out_free_q_info:
vfree(new->q.info);
err_out_free_qcq:
@@ -734,7 +766,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
sizeof(struct ionic_admin_comp),
0,
sizeof(struct ionic_admin_desc_info),
- lif->kern_pid, &lif->adminqcq);
+ lif->kern_pid, NULL, &lif->adminqcq);
if (err)
return err;
ionic_debugfs_add_qcq(lif, lif->adminqcq);
@@ -747,7 +779,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
sizeof(union ionic_notifyq_comp),
0,
sizeof(struct ionic_admin_desc_info),
- lif->kern_pid, &lif->notifyqcq);
+ lif->kern_pid, NULL, &lif->notifyqcq);
if (err)
goto err_out;
ionic_debugfs_add_qcq(lif, lif->notifyqcq);
@@ -925,6 +957,11 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
else
netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
+ err = ionic_register_rxq_info(q, qcq->napi.napi_id);
+ if (err) {
+ netif_napi_del(&qcq->napi);
+ return err;
+ }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -960,7 +997,7 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &txq);
+ lif->kern_pid, NULL, &txq);
if (err)
goto err_qcq_alloc;
@@ -1020,7 +1057,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &rxq);
+ lif->kern_pid, NULL, &rxq);
if (err)
goto err_qcq_alloc;
@@ -1037,7 +1074,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
goto err_qcq_init;
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
- ionic_rx_fill(&rxq->q);
+ ionic_rx_fill(&rxq->q, NULL);
err = ionic_qcq_enable(rxq);
if (err)
goto err_qcq_enable;
@@ -2046,7 +2083,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &lif->txqcqs[i]);
+ lif->kern_pid, NULL, &lif->txqcqs[i]);
if (err)
goto err_out;
@@ -2078,7 +2115,8 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &lif->rxqcqs[i]);
+ lif->kern_pid, lif->xdp_prog,
+ &lif->rxqcqs[i]);
if (err)
goto err_out;
@@ -2143,9 +2181,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
int derr = 0;
int i, err;
- err = ionic_xdp_queues_config(lif);
- if (err)
- return err;
+ ionic_xdp_rxqs_prog_update(lif);
for (i = 0; i < lif->nxqs; i++) {
if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
@@ -2154,7 +2190,8 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
goto err_out;
}
- ionic_rx_fill(&lif->rxqcqs[i]->q);
+ ionic_rx_fill(&lif->rxqcqs[i]->q,
+ READ_ONCE(lif->rxqcqs[i]->q.xdp_prog));
err = ionic_qcq_enable(lif->rxqcqs[i]);
if (err)
goto err_out;
@@ -2167,7 +2204,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
}
if (lif->hwstamp_rxq) {
- ionic_rx_fill(&lif->hwstamp_rxq->q);
+ ionic_rx_fill(&lif->hwstamp_rxq->q, NULL);
err = ionic_qcq_enable(lif->hwstamp_rxq);
if (err)
goto err_out_hwstamp_rx;
@@ -2192,7 +2229,7 @@ err_out:
derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
}
- ionic_xdp_queues_config(lif);
+ ionic_xdp_rxqs_prog_update(lif);
return err;
}
@@ -2651,7 +2688,7 @@ static void ionic_vf_attr_replay(struct ionic_lif *lif)
ionic_vf_start(ionic);
}
-static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
+static void ionic_unregister_rxq_info(struct ionic_queue *q)
{
struct xdp_rxq_info *xi;
@@ -2665,7 +2702,7 @@ static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
kfree(xi);
}
-static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
+static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
{
struct xdp_rxq_info *rxq_info;
int err;
@@ -2676,15 +2713,15 @@ static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_
err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
if (err) {
- dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n",
- q->index, err);
+ netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg failed, err %d\n",
+ q->index, err);
goto err_out;
}
- err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL);
+ err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_POOL, q->page_pool);
if (err) {
- dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n",
- q->index, err);
+ netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg_mem_model failed, err %d\n",
+ q->index, err);
xdp_rxq_info_unreg(rxq_info);
goto err_out;
}
@@ -2698,44 +2735,20 @@ err_out:
return err;
}
-static int ionic_xdp_queues_config(struct ionic_lif *lif)
+static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif)
{
+ struct bpf_prog *xdp_prog;
unsigned int i;
- int err;
if (!lif->rxqcqs)
- return 0;
-
- /* There's no need to rework memory if not going to/from NULL program.
- * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info
- * This way we don't need to keep an *xdp_prog in every queue struct.
- */
- if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info)
- return 0;
+ return;
+ xdp_prog = READ_ONCE(lif->xdp_prog);
for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
struct ionic_queue *q = &lif->rxqcqs[i]->q;
- if (q->xdp_rxq_info) {
- ionic_xdp_unregister_rxq_info(q);
- continue;
- }
-
- err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id);
- if (err) {
- dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n",
- i, err);
- goto err_out;
- }
+ WRITE_ONCE(q->xdp_prog, xdp_prog);
}
-
- return 0;
-
-err_out:
- for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++)
- ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q);
-
- return err;
}
static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
@@ -2765,11 +2778,17 @@ static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
if (!netif_running(netdev)) {
old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ } else if (lif->xdp_prog && bpf->prog) {
+ old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ ionic_xdp_rxqs_prog_update(lif);
} else {
+ struct ionic_queue_params qparams;
+
+ ionic_init_queue_params(lif, &qparams);
+ qparams.xdp_prog = bpf->prog;
mutex_lock(&lif->queue_lock);
- ionic_stop_queues_reconfig(lif);
+ ionic_reconfigure_queues(lif, &qparams);
old_prog = xchg(&lif->xdp_prog, bpf->prog);
- ionic_start_queues_reconfig(lif);
mutex_unlock(&lif->queue_lock);
}
@@ -2871,13 +2890,23 @@ err_out:
static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
{
- /* only swapping the queues, not the napi, flags, or other stuff */
+ /* only swapping the queues and napi, not flags or other stuff */
+ swap(a->napi, b->napi);
+
+ if (a->q.type == IONIC_QTYPE_RXQ) {
+ swap(a->q.page_pool, b->q.page_pool);
+ a->q.page_pool->p.napi = &a->napi;
+ if (b->q.page_pool) /* is NULL when increasing queue count */
+ b->q.page_pool->p.napi = &b->napi;
+ }
+
swap(a->q.features, b->q.features);
swap(a->q.num_descs, b->q.num_descs);
swap(a->q.desc_size, b->q.desc_size);
swap(a->q.base, b->q.base);
swap(a->q.base_pa, b->q.base_pa);
swap(a->q.info, b->q.info);
+ swap(a->q.xdp_prog, b->q.xdp_prog);
swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info);
swap(a->q.partner, b->q.partner);
swap(a->q_base, b->q_base);
@@ -2928,7 +2957,8 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
}
if (qparam->nxqs != lif->nxqs ||
qparam->nrxq_descs != lif->nrxq_descs ||
- qparam->rxq_features != lif->rxq_features) {
+ qparam->rxq_features != lif->rxq_features ||
+ qparam->xdp_prog != lif->xdp_prog) {
rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!rx_qcqs) {
@@ -2959,7 +2989,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &lif->txqcqs[i]);
+ lif->kern_pid, NULL, &lif->txqcqs[i]);
if (err)
goto err_out;
}
@@ -2968,7 +2998,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &tx_qcqs[i]);
+ lif->kern_pid, NULL, &tx_qcqs[i]);
if (err)
goto err_out;
}
@@ -2990,7 +3020,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &lif->rxqcqs[i]);
+ lif->kern_pid, NULL, &lif->rxqcqs[i]);
if (err)
goto err_out;
}
@@ -2999,11 +3029,12 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &rx_qcqs[i]);
+ lif->kern_pid, qparam->xdp_prog, &rx_qcqs[i]);
if (err)
goto err_out;
rx_qcqs[i]->q.features = qparam->rxq_features;
+ rx_qcqs[i]->q.xdp_prog = qparam->xdp_prog;
}
}
@@ -3220,7 +3251,7 @@ int ionic_lif_alloc(struct ionic *ionic)
netdev->netdev_ops = &ionic_netdev_ops;
ionic_ethtool_set_ops(netdev);
- netdev->watchdog_timeo = 2 * HZ;
+ netdev->watchdog_timeo = 5 * HZ;
netif_carrier_off(netdev);
lif->identity = lid;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 3e1005293c4a..e01756fb7fdd 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -268,6 +268,7 @@ struct ionic_queue_params {
unsigned int ntxq_descs;
unsigned int nrxq_descs;
u64 rxq_features;
+ struct bpf_prog *xdp_prog;
bool intr_split;
bool cmb_tx;
bool cmb_rx;
@@ -280,6 +281,7 @@ static inline void ionic_init_queue_params(struct ionic_lif *lif,
qparam->ntxq_descs = lif->ntxq_descs;
qparam->nrxq_descs = lif->nrxq_descs;
qparam->rxq_features = lif->rxq_features;
+ qparam->xdp_prog = lif->xdp_prog;
qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
qparam->cmb_tx = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
qparam->cmb_rx = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index 1ee2f285cb42..528114877677 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -312,8 +312,8 @@ static int ionic_lif_filter_add(struct ionic_lif *lif,
int err = 0;
ctx.cmd.rx_filter_add = *ac;
- ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD,
- ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index),
+ ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD;
+ ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index);
spin_lock_bh(&lif->rx_filters.lock);
f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index fc79baad4561..0eeda7e502db 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -6,6 +6,7 @@
#include <linux/if_vlan.h>
#include <net/ip6_checksum.h>
#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
#include "ionic.h"
#include "ionic_lif.h"
@@ -118,108 +119,57 @@ static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info)
static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info)
{
- return buf_info->dma_addr + buf_info->page_offset;
+ return page_pool_get_dma_addr(buf_info->page) + buf_info->page_offset;
}
-static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info)
+static void __ionic_rx_put_buf(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info,
+ bool recycle_direct)
{
- return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset);
-}
-
-static int ionic_rx_page_alloc(struct ionic_queue *q,
- struct ionic_buf_info *buf_info)
-{
- struct device *dev = q->dev;
- dma_addr_t dma_addr;
- struct page *page;
-
- page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
- if (unlikely(!page)) {
- net_err_ratelimited("%s: %s page alloc failed\n",
- dev_name(dev), q->name);
- q_to_rx_stats(q)->alloc_err++;
- return -ENOMEM;
- }
-
- dma_addr = dma_map_page(dev, page, 0,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma_addr))) {
- __free_pages(page, 0);
- net_err_ratelimited("%s: %s dma map failed\n",
- dev_name(dev), q->name);
- q_to_rx_stats(q)->dma_map_err++;
- return -EIO;
- }
-
- buf_info->dma_addr = dma_addr;
- buf_info->page = page;
- buf_info->page_offset = 0;
-
- return 0;
-}
-
-static void ionic_rx_page_free(struct ionic_queue *q,
- struct ionic_buf_info *buf_info)
-{
- struct device *dev = q->dev;
-
- if (unlikely(!buf_info)) {
- net_err_ratelimited("%s: %s invalid buf_info in free\n",
- dev_name(dev), q->name);
- return;
- }
-
if (!buf_info->page)
return;
- dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- __free_pages(buf_info->page, 0);
+ page_pool_put_full_page(q->page_pool, buf_info->page, recycle_direct);
buf_info->page = NULL;
+ buf_info->len = 0;
+ buf_info->page_offset = 0;
}
-static bool ionic_rx_buf_recycle(struct ionic_queue *q,
- struct ionic_buf_info *buf_info, u32 len)
-{
- u32 size;
-
- /* don't re-use pages allocated in low-mem condition */
- if (page_is_pfmemalloc(buf_info->page))
- return false;
-
- /* don't re-use buffers from non-local numa nodes */
- if (page_to_nid(buf_info->page) != numa_mem_id())
- return false;
-
- size = ALIGN(len, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ);
- buf_info->page_offset += size;
- if (buf_info->page_offset >= IONIC_PAGE_SIZE)
- return false;
- get_page(buf_info->page);
+static void ionic_rx_put_buf(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info)
+{
+ __ionic_rx_put_buf(q, buf_info, false);
+}
- return true;
+static void ionic_rx_put_buf_direct(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info)
+{
+ __ionic_rx_put_buf(q, buf_info, true);
}
static void ionic_rx_add_skb_frag(struct ionic_queue *q,
struct sk_buff *skb,
struct ionic_buf_info *buf_info,
- u32 off, u32 len,
+ u32 headroom, u32 len,
bool synced)
{
if (!synced)
- dma_sync_single_range_for_cpu(q->dev, ionic_rx_buf_pa(buf_info),
- off, len, DMA_FROM_DEVICE);
+ page_pool_dma_sync_for_cpu(q->page_pool,
+ buf_info->page,
+ buf_info->page_offset + headroom,
+ len);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf_info->page, buf_info->page_offset + off,
- len,
- IONIC_PAGE_SIZE);
+ buf_info->page, buf_info->page_offset + headroom,
+ len, buf_info->len);
- if (!ionic_rx_buf_recycle(q, buf_info, len)) {
- dma_unmap_page(q->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- buf_info->page = NULL;
- }
+ /* napi_gro_frags() will release/recycle the
+ * page_pool buffers from the frags list
+ */
+ buf_info->page = NULL;
+ buf_info->len = 0;
+ buf_info->page_offset = 0;
}
static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
@@ -244,12 +194,13 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
q_to_rx_stats(q)->alloc_err++;
return NULL;
}
+ skb_mark_for_recycle(skb);
if (headroom)
frag_len = min_t(u16, len,
IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
else
- frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
+ frag_len = min_t(u16, len, IONIC_PAGE_SIZE);
if (unlikely(!buf_info->page))
goto err_bad_buf_page;
@@ -260,7 +211,7 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
for (i = 0; i < num_sg_elems; i++, buf_info++) {
if (unlikely(!buf_info->page))
goto err_bad_buf_page;
- frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
+ frag_len = min_t(u16, len, buf_info->len);
ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced);
len -= frag_len;
}
@@ -277,11 +228,13 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
struct ionic_rx_desc_info *desc_info,
unsigned int headroom,
unsigned int len,
+ unsigned int num_sg_elems,
bool synced)
{
struct ionic_buf_info *buf_info;
struct device *dev = q->dev;
struct sk_buff *skb;
+ int i;
buf_info = &desc_info->bufs[0];
@@ -292,54 +245,52 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
q_to_rx_stats(q)->alloc_err++;
return NULL;
}
-
- if (unlikely(!buf_info->page)) {
- dev_kfree_skb(skb);
- return NULL;
- }
+ skb_mark_for_recycle(skb);
if (!synced)
- dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
- headroom, len, DMA_FROM_DEVICE);
+ page_pool_dma_sync_for_cpu(q->page_pool,
+ buf_info->page,
+ buf_info->page_offset + headroom,
+ len);
+
skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
- dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info),
- headroom, len, DMA_FROM_DEVICE);
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, netdev);
+ /* recycle the Rx buffer now that we're done with it */
+ ionic_rx_put_buf_direct(q, buf_info);
+ buf_info++;
+ for (i = 0; i < num_sg_elems; i++, buf_info++)
+ ionic_rx_put_buf_direct(q, buf_info);
+
return skb;
}
static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
- struct ionic_tx_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info,
+ bool in_napi)
{
- unsigned int nbufs = desc_info->nbufs;
- struct ionic_buf_info *buf_info;
- struct device *dev = q->dev;
- int i;
+ struct xdp_frame_bulk bq;
- if (!nbufs)
+ if (!desc_info->nbufs)
return;
- buf_info = desc_info->bufs;
- dma_unmap_single(dev, buf_info->dma_addr,
- buf_info->len, DMA_TO_DEVICE);
- if (desc_info->act == XDP_TX)
- __free_pages(buf_info->page, 0);
- buf_info->page = NULL;
+ xdp_frame_bulk_init(&bq);
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
- buf_info++;
- for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) {
- dma_unmap_page(dev, buf_info->dma_addr,
- buf_info->len, DMA_TO_DEVICE);
- if (desc_info->act == XDP_TX)
- __free_pages(buf_info->page, 0);
- buf_info->page = NULL;
+ if (desc_info->act == XDP_TX) {
+ if (likely(in_napi))
+ xdp_return_frame_rx_napi(desc_info->xdpf);
+ else
+ xdp_return_frame(desc_info->xdpf);
+ } else if (desc_info->act == XDP_REDIRECT) {
+ ionic_tx_desc_unmap_bufs(q, desc_info);
+ xdp_return_frame_bulk(desc_info->xdpf, &bq);
}
- if (desc_info->act == XDP_REDIRECT)
- xdp_return_frame(desc_info->xdpf);
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
desc_info->nbufs = 0;
desc_info->xdpf = NULL;
@@ -363,9 +314,17 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
buf_info = desc_info->bufs;
stats = q_to_tx_stats(q);
- dma_addr = ionic_tx_map_single(q, frame->data, len);
- if (!dma_addr)
- return -EIO;
+ if (act == XDP_TX) {
+ dma_addr = page_pool_get_dma_addr(page) +
+ off + XDP_PACKET_HEADROOM;
+ dma_sync_single_for_device(q->dev, dma_addr,
+ len, DMA_TO_DEVICE);
+ } else /* XDP_REDIRECT */ {
+ dma_addr = ionic_tx_map_single(q, frame->data, len);
+ if (!dma_addr)
+ return -EIO;
+ }
+
buf_info->dma_addr = dma_addr;
buf_info->len = len;
buf_info->page = page;
@@ -387,10 +346,21 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
frag = sinfo->frags;
elem = ionic_tx_sg_elems(q);
for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
- dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
- if (!dma_addr) {
- ionic_tx_desc_unmap_bufs(q, desc_info);
- return -EIO;
+ if (act == XDP_TX) {
+ struct page *pg = skb_frag_page(frag);
+
+ dma_addr = page_pool_get_dma_addr(pg) +
+ skb_frag_off(frag);
+ dma_sync_single_for_device(q->dev, dma_addr,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ } else {
+ dma_addr = ionic_tx_map_frag(q, frag, 0,
+ skb_frag_size(frag));
+ if (dma_mapping_error(q->dev, dma_addr)) {
+ ionic_tx_desc_unmap_bufs(q, desc_info);
+ return -EIO;
+ }
}
bi->dma_addr = dma_addr;
bi->len = skb_frag_size(frag);
@@ -481,15 +451,13 @@ int ionic_xdp_xmit(struct net_device *netdev, int n,
return nxmit;
}
-static void ionic_xdp_rx_put_bufs(struct ionic_queue *q,
- struct ionic_buf_info *buf_info,
- int nbufs)
+static void ionic_xdp_rx_unlink_bufs(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info,
+ int nbufs)
{
int i;
for (i = 0; i < nbufs; i++) {
- dma_unmap_page(q->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
buf_info->page = NULL;
buf_info++;
}
@@ -516,11 +484,9 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
XDP_PACKET_HEADROOM, frag_len, false);
-
- dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
- XDP_PACKET_HEADROOM, frag_len,
- DMA_FROM_DEVICE);
-
+ page_pool_dma_sync_for_cpu(rxq->page_pool, buf_info->page,
+ buf_info->page_offset + XDP_PACKET_HEADROOM,
+ frag_len);
prefetchw(&xdp_buf.data_hard_start);
/* We limit MTU size to one buffer if !xdp_has_frags, so
@@ -542,15 +508,16 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
do {
if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
err = -ENOSPC;
- goto out_xdp_abort;
+ break;
}
frag = &sinfo->frags[sinfo->nr_frags];
sinfo->nr_frags++;
bi++;
- frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi));
- dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi),
- 0, frag_len, DMA_FROM_DEVICE);
+ frag_len = min_t(u16, remain_len, bi->len);
+ page_pool_dma_sync_for_cpu(rxq->page_pool, bi->page,
+ buf_info->page_offset,
+ frag_len);
skb_frag_fill_page_desc(frag, bi->page, 0, frag_len);
sinfo->xdp_frags_size += frag_len;
remain_len -= frag_len;
@@ -569,14 +536,16 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
return false; /* false = we didn't consume the packet */
case XDP_DROP:
- ionic_rx_page_free(rxq, buf_info);
+ ionic_rx_put_buf_direct(rxq, buf_info);
stats->xdp_drop++;
break;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(&xdp_buf);
- if (!xdpf)
- goto out_xdp_abort;
+ if (!xdpf) {
+ err = -ENOSPC;
+ break;
+ }
txq = rxq->partner;
nq = netdev_get_tx_queue(netdev, txq->index);
@@ -588,7 +557,8 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
ionic_q_space_avail(txq),
1, 1)) {
__netif_tx_unlock(nq);
- goto out_xdp_abort;
+ err = -EIO;
+ break;
}
err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
@@ -598,49 +568,47 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
__netif_tx_unlock(nq);
if (unlikely(err)) {
netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
- goto out_xdp_abort;
+ break;
}
- ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
+ ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
stats->xdp_tx++;
-
- /* the Tx completion will free the buffers */
break;
case XDP_REDIRECT:
err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
if (unlikely(err)) {
netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
- goto out_xdp_abort;
+ break;
}
- ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
+ ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
rxq->xdp_flush = true;
stats->xdp_redirect++;
break;
case XDP_ABORTED:
default:
- goto out_xdp_abort;
+ err = -EIO;
+ break;
}
- return true;
-
-out_xdp_abort:
- trace_xdp_exception(netdev, xdp_prog, xdp_action);
- ionic_rx_page_free(rxq, buf_info);
- stats->xdp_aborted++;
+ if (err) {
+ ionic_rx_put_buf_direct(rxq, buf_info);
+ trace_xdp_exception(netdev, xdp_prog, xdp_action);
+ stats->xdp_aborted++;
+ }
return true;
}
static void ionic_rx_clean(struct ionic_queue *q,
struct ionic_rx_desc_info *desc_info,
- struct ionic_rxq_comp *comp)
+ struct ionic_rxq_comp *comp,
+ struct bpf_prog *xdp_prog)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_qcq *qcq = q_to_qcq(q);
struct ionic_rx_stats *stats;
- struct bpf_prog *xdp_prog;
- unsigned int headroom;
+ unsigned int headroom = 0;
struct sk_buff *skb;
bool synced = false;
bool use_copybreak;
@@ -648,7 +616,14 @@ static void ionic_rx_clean(struct ionic_queue *q,
stats = q_to_rx_stats(q);
- if (comp->status) {
+ if (unlikely(comp->status)) {
+ /* Most likely status==2 and the pkt received was bigger
+ * than the buffer available: comp->len will show the
+ * pkt size received that didn't fit the advertised desc.len
+ */
+ dev_dbg(q->dev, "q%d drop comp->status %d comp->len %d desc->len %d\n",
+ q->index, comp->status, comp->len, q->rxq[q->head_idx].len);
+
stats->dropped++;
return;
}
@@ -657,18 +632,18 @@ static void ionic_rx_clean(struct ionic_queue *q,
stats->pkts++;
stats->bytes += len;
- xdp_prog = READ_ONCE(q->lif->xdp_prog);
if (xdp_prog) {
if (ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len))
return;
synced = true;
+ headroom = XDP_PACKET_HEADROOM;
}
- headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
use_copybreak = len <= q->lif->rx_copybreak;
if (use_copybreak)
skb = ionic_rx_copybreak(netdev, q, desc_info,
- headroom, len, synced);
+ headroom, len,
+ comp->num_sg_elems, synced);
else
skb = ionic_rx_build_skb(q, desc_info, headroom, len,
comp->num_sg_elems, synced);
@@ -744,7 +719,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
napi_gro_frags(&qcq->napi);
}
-bool ionic_rx_service(struct ionic_cq *cq)
+static bool __ionic_rx_service(struct ionic_cq *cq, struct bpf_prog *xdp_prog)
{
struct ionic_rx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
@@ -766,11 +741,16 @@ bool ionic_rx_service(struct ionic_cq *cq)
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */
- ionic_rx_clean(q, desc_info, comp);
+ ionic_rx_clean(q, desc_info, comp, xdp_prog);
return true;
}
+bool ionic_rx_service(struct ionic_cq *cq)
+{
+ return __ionic_rx_service(cq, NULL);
+}
+
static inline void ionic_write_cmb_desc(struct ionic_queue *q,
void *desc)
{
@@ -781,7 +761,7 @@ static inline void ionic_write_cmb_desc(struct ionic_queue *q,
memcpy_toio(&q->cmb_txq[q->head_idx], desc, sizeof(q->cmb_txq[0]));
}
-void ionic_rx_fill(struct ionic_queue *q)
+void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_rx_desc_info *desc_info;
@@ -789,6 +769,9 @@ void ionic_rx_fill(struct ionic_queue *q)
struct ionic_buf_info *buf_info;
unsigned int fill_threshold;
struct ionic_rxq_desc *desc;
+ unsigned int first_frag_len;
+ unsigned int first_buf_len;
+ unsigned int headroom = 0;
unsigned int remain_len;
unsigned int frag_len;
unsigned int nfrags;
@@ -806,35 +789,43 @@ void ionic_rx_fill(struct ionic_queue *q)
len = netdev->mtu + VLAN_ETH_HLEN;
- for (i = n_fill; i; i--) {
- unsigned int headroom;
- unsigned int buf_len;
+ if (xdp_prog) {
+ /* Always alloc the full size buffer, but only need
+ * the actual frag_len in the descriptor
+ * XDP uses space in the first buffer, so account for
+ * head room, tail room, and ip header in the first frag size.
+ */
+ headroom = XDP_PACKET_HEADROOM;
+ first_buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN + headroom;
+ first_frag_len = min_t(u16, len + headroom, first_buf_len);
+ } else {
+ /* Use MTU size if smaller than max buffer size */
+ first_frag_len = min_t(u16, len, IONIC_PAGE_SIZE);
+ first_buf_len = first_frag_len;
+ }
+ for (i = n_fill; i; i--) {
+ /* fill main descriptor - buf[0] */
nfrags = 0;
remain_len = len;
desc = &q->rxq[q->head_idx];
desc_info = &q->rx_info[q->head_idx];
buf_info = &desc_info->bufs[0];
- if (!buf_info->page) { /* alloc a new buffer? */
- if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
- desc->addr = 0;
- desc->len = 0;
- return;
- }
+ buf_info->len = first_buf_len;
+ frag_len = first_frag_len - headroom;
+
+ /* get a new buffer if we can't reuse one */
+ if (!buf_info->page)
+ buf_info->page = page_pool_alloc(q->page_pool,
+ &buf_info->page_offset,
+ &buf_info->len,
+ GFP_ATOMIC);
+ if (unlikely(!buf_info->page)) {
+ buf_info->len = 0;
+ return;
}
- /* fill main descriptor - buf[0]
- * XDP uses space in the first buffer, so account for
- * head room, tail room, and ip header in the first frag size.
- */
- headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
- if (q->xdp_rxq_info)
- buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN;
- else
- buf_len = ionic_rx_buf_size(buf_info);
- frag_len = min_t(u16, len, buf_len);
-
desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
desc->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
@@ -844,16 +835,26 @@ void ionic_rx_fill(struct ionic_queue *q)
/* fill sg descriptors - buf[1..n] */
sg_elem = q->rxq_sgl[q->head_idx].elems;
for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) {
- if (!buf_info->page) { /* alloc a new sg buffer? */
- if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
- sg_elem->addr = 0;
- sg_elem->len = 0;
+ frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE);
+
+ /* Recycle any leftover buffers that are too small to reuse */
+ if (unlikely(buf_info->page && buf_info->len < frag_len))
+ ionic_rx_put_buf_direct(q, buf_info);
+
+ /* Get new buffer if needed */
+ if (!buf_info->page) {
+ buf_info->len = frag_len;
+ buf_info->page = page_pool_alloc(q->page_pool,
+ &buf_info->page_offset,
+ &buf_info->len,
+ GFP_ATOMIC);
+ if (unlikely(!buf_info->page)) {
+ buf_info->len = 0;
return;
}
}
sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
- frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info));
sg_elem->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
@@ -883,17 +884,12 @@ void ionic_rx_fill(struct ionic_queue *q)
void ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_rx_desc_info *desc_info;
- struct ionic_buf_info *buf_info;
unsigned int i, j;
for (i = 0; i < q->num_descs; i++) {
desc_info = &q->rx_info[i];
- for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) {
- buf_info = &desc_info->bufs[j];
- if (buf_info->page)
- ionic_rx_page_free(q, buf_info);
- }
-
+ for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++)
+ ionic_rx_put_buf(q, &desc_info->bufs[j]);
desc_info->nbufs = 0;
}
@@ -974,6 +970,32 @@ static void ionic_xdp_do_flush(struct ionic_cq *cq)
}
}
+static unsigned int ionic_rx_cq_service(struct ionic_cq *cq,
+ unsigned int work_to_do)
+{
+ struct ionic_queue *q = cq->bound_q;
+ unsigned int work_done = 0;
+ struct bpf_prog *xdp_prog;
+
+ if (work_to_do == 0)
+ return 0;
+
+ xdp_prog = READ_ONCE(q->xdp_prog);
+ while (__ionic_rx_service(cq, xdp_prog)) {
+ if (cq->tail_idx == cq->num_descs - 1)
+ cq->done_color = !cq->done_color;
+
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+
+ if (++work_done >= work_to_do)
+ break;
+ }
+ ionic_rx_fill(q, xdp_prog);
+ ionic_xdp_do_flush(cq);
+
+ return work_done;
+}
+
int ionic_rx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
@@ -984,12 +1006,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
if (unlikely(!budget))
return budget;
- work_done = ionic_cq_service(cq, budget,
- ionic_rx_service, NULL, NULL);
-
- ionic_rx_fill(cq->bound_q);
+ work_done = ionic_rx_cq_service(cq, budget);
- ionic_xdp_do_flush(cq);
if (work_done < budget && napi_complete_done(napi, work_done)) {
ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -1030,12 +1048,8 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
if (unlikely(!budget))
return budget;
- rx_work_done = ionic_cq_service(rxcq, budget,
- ionic_rx_service, NULL, NULL);
-
- ionic_rx_fill(rxcq->bound_q);
+ rx_work_done = ionic_rx_cq_service(rxcq, budget);
- ionic_xdp_do_flush(rxcq);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
ionic_dim_update(rxqcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -1166,7 +1180,7 @@ static void ionic_tx_clean(struct ionic_queue *q,
struct sk_buff *skb;
if (desc_info->xdpf) {
- ionic_xdp_tx_desc_clean(q->partner, desc_info);
+ ionic_xdp_tx_desc_clean(q->partner, desc_info, in_napi);
stats->clean++;
if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index)))
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index 9e73e324e7a1..b2b9a2dc9eb8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -4,9 +4,11 @@
#ifndef _IONIC_TXRX_H_
#define _IONIC_TXRX_H_
+struct bpf_prog;
+
void ionic_tx_flush(struct ionic_cq *cq);
-void ionic_rx_fill(struct ionic_queue *q);
+void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog);
void ionic_rx_empty(struct ionic_queue *q);
void ionic_tx_empty(struct ionic_queue *q);
int ionic_rx_napi(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index ed24d6af7487..9cff0a8ffb2c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -3185,8 +3185,7 @@ netxen_list_config_ip(struct netxen_adapter *adapter,
struct list_head *head;
bool ret = false;
- dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
-
+ dev = ifa->ifa_dev->dev;
if (dev == NULL)
goto out;
@@ -3379,7 +3378,7 @@ netxen_inetaddr_event(struct notifier_block *this,
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
unsigned long ip_event;
- dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+ dev = ifa->ifa_dev->dev;
ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
recheck:
if (dev == NULL)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 63e3dac4d5f7..9d6399a5c780 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -326,25 +326,18 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *i
struct qede_ptp *ptp = edev->ptp;
if (!ptp) {
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (ptp->clock)
info->phc_index = ptp_clock_index(ptp->clock);
- else
- info->phc_index = -1;
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index b25102fded7b..3d0b5cd978cb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1608,7 +1608,6 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *);
int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
-void qlcnic_watchdog_task(struct work_struct *work);
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
void qlcnic_set_multi(struct net_device *netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index bcef8ab715bf..d7cdea8f604d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2042,12 +2042,14 @@ int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode)
int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
{
- int err;
- u32 word;
struct qlcnic_cmd_args cmd;
- const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
- 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
- 0x255b0ec26d5a56daULL };
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
+ u32 word;
+ int err;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
if (err)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 23cd47d588e5..a55fe6ac06c7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -539,7 +539,6 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *);
void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
-int qlcnic_send_ctrl_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *, u32);
void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
@@ -577,8 +576,6 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
void qlcnic_free_mbx_args(struct qlcnic_cmd_args *);
-void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
- struct qlcnic_info *);
int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *,
struct ethtool_coalesce *);
int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *);
@@ -590,7 +587,6 @@ irqreturn_t qlcnic_83xx_intr(int, void *);
irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
const struct pci_device_id *);
-int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *);
void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *);
void qlcnic_83xx_register_map(struct qlcnic_hardware_context *);
@@ -602,8 +598,6 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *, u32, u32 *, int);
int qlcnic_83xx_flash_write32(struct qlcnic_adapter *, u32, u32 *);
int qlcnic_83xx_lock_flash(struct qlcnic_adapter *);
void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *);
-int qlcnic_83xx_save_flash_status(struct qlcnic_adapter *);
-int qlcnic_83xx_restore_flash_status(struct qlcnic_adapter *, int);
int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *);
int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
@@ -616,13 +610,9 @@ void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
-int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
-int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
- struct qlcnic_info *, u8);
-int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 90df4a0909fa..b3588a1ebc25 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -4146,7 +4146,7 @@ qlcnic_inetaddr_event(struct notifier_block *this,
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
- dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+ dev = ifa->ifa_dev->dev;
recheck:
if (dev == NULL)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index f1e40aade127..4f0ddcedfa97 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -286,7 +286,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->needs_free_netdev = true;
rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
- rmnet_dev->features |= NETIF_F_LLTX;
+ rmnet_dev->lltx = true;
/* This perm addr will be used as interface identifier by IPv6 */
rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 03015b665f4e..8a8ea51c639e 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -120,4 +120,23 @@ config R8169_LEDS
Optional support for controlling the NIC LED's with the netdev
LED trigger.
+config RTASE
+ tristate "Realtek Automotive Switch 9054/9068/9072/9075/9068/9071 PCIe Interface support"
+ depends on PCI
+ select CRC32
+ select PAGE_POOL
+ help
+ Say Y here and it will be compiled and linked with the kernel
+ if you have a Realtek Ethernet adapter belonging to the
+ following families:
+ RTL9054 5GBit Ethernet
+ RTL9068 5GBit Ethernet
+ RTL9072 5GBit Ethernet
+ RTL9075 5GBit Ethernet
+ RTL9068 5GBit Ethernet
+ RTL9071 5GBit Ethernet
+
+ To compile this driver as a module, choose M here: the module
+ will be called rtase. This is recommended.
+
endif # NET_VENDOR_REALTEK
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
index 635491d8826e..046adf503ff4 100644
--- a/drivers/net/ethernet/realtek/Makefile
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_ATP) += atp.o
r8169-y += r8169_main.o r8169_firmware.o r8169_phy_config.o
r8169-$(CONFIG_R8169_LEDS) += r8169_leds.o
obj-$(CONFIG_R8169) += r8169.o
+obj-$(CONFIG_RTASE) += rtase/
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 00882ffc7a02..e2db944e6fa8 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -69,6 +69,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
RTL_GIGA_MAC_VER_65,
+ RTL_GIGA_MAC_VER_66,
RTL_GIGA_MAC_NONE
};
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 3507c2e28110..0cc9baaecb1b 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -28,7 +28,7 @@
#include <linux/bitfield.h>
#include <linux/prefetch.h>
#include <linux/ipv6.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/ip6_checksum.h>
#include <net/netdev_queues.h>
@@ -56,6 +56,7 @@
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
+#define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw"
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -138,6 +139,7 @@ static const struct {
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
[RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
[RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2},
+ [RTL_GIGA_MAC_VER_66] = {"RTL8126A", FIRMWARE_8126A_3},
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -576,7 +578,34 @@ struct rtl8169_counters {
__le64 rx_broadcast;
__le32 rx_multicast;
__le16 tx_aborted;
- __le16 tx_underun;
+ __le16 tx_underrun;
+ /* new since RTL8125 */
+ __le64 tx_octets;
+ __le64 rx_octets;
+ __le64 rx_multicast64;
+ __le64 tx_unicast64;
+ __le64 tx_broadcast64;
+ __le64 tx_multicast64;
+ __le32 tx_pause_on;
+ __le32 tx_pause_off;
+ __le32 tx_pause_all;
+ __le32 tx_deferred;
+ __le32 tx_late_collision;
+ __le32 tx_all_collision;
+ __le32 tx_aborted32;
+ __le32 align_errors32;
+ __le32 rx_frame_too_long;
+ __le32 rx_runt;
+ __le32 rx_pause_on;
+ __le32 rx_pause_off;
+ __le32 rx_pause_all;
+ __le32 rx_unknown_opcode;
+ __le32 rx_mac_error;
+ __le32 tx_underrun32;
+ __le32 rx_mac_missed;
+ __le32 rx_tcam_dropped;
+ __le32 tdu;
+ __le32 rdu;
};
struct rtl8169_tc_offsets {
@@ -679,6 +708,7 @@ MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
+MODULE_FIRMWARE(FIRMWARE_8126A_3);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
@@ -1201,7 +1231,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1216,7 +1246,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1425,7 +1455,7 @@ static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66:
if (enable)
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN);
else
@@ -1592,7 +1622,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66:
if (wolopts)
rtl_mod_config2(tp, 0, PME_SIGNAL);
else
@@ -1841,7 +1871,7 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
data[9] = le64_to_cpu(counters->rx_broadcast);
data[10] = le32_to_cpu(counters->rx_multicast);
data[11] = le16_to_cpu(counters->tx_aborted);
- data[12] = le16_to_cpu(counters->tx_underun);
+ data[12] = le16_to_cpu(counters->tx_underrun);
}
static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2071,6 +2101,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_63:
case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_66:
tp->tx_lpi_timer = timer_val;
RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
break;
@@ -2199,6 +2230,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
enum mac_version ver;
} mac_info[] = {
/* 8126A family. */
+ { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_66 },
{ 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 },
/* 8125B family. */
@@ -2470,6 +2502,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
break;
case RTL_GIGA_MAC_VER_63:
case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_66:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2656,7 +2689,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
@@ -2899,7 +2932,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2913,7 +2946,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
break;
default:
@@ -2940,6 +2973,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
rtl_mod_config5(tp, 0, ASPM_en);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_66:
val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -2950,7 +2984,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -2962,7 +2996,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
} else {
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
@@ -2971,6 +3005,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_66:
val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -3690,10 +3725,12 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
/* disable new tx descriptor format */
r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_66)
RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_66)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
@@ -3711,7 +3748,8 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_66)
r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
else
r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
@@ -3825,6 +3863,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
[RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8126a,
};
if (hw_configs[tp->mac_version])
@@ -3845,6 +3884,7 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
break;
case RTL_GIGA_MAC_VER_63:
case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_66:
for (i = 0xa00; i < 0xa80; i += 4)
RTL_W32(tp, i, 0);
RTL_W16(tp, INT_CFG1_8125, 0x0000);
@@ -4073,7 +4113,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
rtl_enable_rxdvgate(tp);
fsleep(2000);
break;
@@ -4224,7 +4264,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
padto = max_t(unsigned int, padto, ETH_ZLEN);
break;
default:
@@ -5257,7 +5297,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
rtl_hw_init_8125(tp);
break;
default:
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 1f74317beb88..cf29b1208482 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -1060,6 +1060,7 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
rtl8168g_enable_gphy_10m(phydev);
+ rtl8168g_disable_aldps(phydev);
rtl8125a_config_eee_phy(phydev);
}
@@ -1099,6 +1100,7 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000);
rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
rtl8125b_config_eee_phy(phydev);
}
@@ -1159,6 +1161,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
[RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_66] = rtl8126a_hw_phy_config,
};
if (phy_configs[ver])
diff --git a/drivers/net/ethernet/realtek/rtase/Makefile b/drivers/net/ethernet/realtek/rtase/Makefile
new file mode 100644
index 000000000000..ba3d8550f9e6
--- /dev/null
+++ b/drivers/net/ethernet/realtek/rtase/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# Copyright(c) 2024 Realtek Semiconductor Corp. All rights reserved.
+
+#
+# Makefile for the Realtek PCIe driver
+#
+
+obj-$(CONFIG_RTASE) += rtase.o
+
+rtase-objs := rtase_main.o
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
new file mode 100644
index 000000000000..583c33930f88
--- /dev/null
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * rtase is the Linux device driver released for Realtek Automotive Switch
+ * controllers with PCI-Express interface.
+ *
+ * Copyright(c) 2024 Realtek Semiconductor Corp.
+ */
+
+#ifndef RTASE_H
+#define RTASE_H
+
+#define RTASE_HW_VER_MASK 0x7C800000
+
+#define RTASE_RX_DMA_BURST_256 4
+#define RTASE_TX_DMA_BURST_UNLIMITED 7
+
+#define RTASE_RX_BUF_SIZE (PAGE_SIZE - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define RTASE_MAX_JUMBO_SIZE (RTASE_RX_BUF_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN)
+
+/* 3 means InterFrameGap = the shortest one */
+#define RTASE_INTERFRAMEGAP 0x03
+
+#define RTASE_REGS_SIZE 256
+#define RTASE_PCI_REGS_SIZE 0x100
+
+#define RTASE_MULTICAST_FILTER_MASK GENMASK(30, 26)
+
+#define RTASE_VLAN_FILTER_ENTRY_NUM 32
+#define RTASE_NUM_TX_QUEUE 8
+#define RTASE_NUM_RX_QUEUE 4
+
+#define RTASE_TXQ_CTRL 1
+#define RTASE_FUNC_TXQ_NUM 1
+#define RTASE_FUNC_RXQ_NUM 1
+#define RTASE_INTERRUPT_NUM 1
+
+#define RTASE_MITI_TIME_COUNT_MASK GENMASK(3, 0)
+#define RTASE_MITI_TIME_UNIT_MASK GENMASK(7, 4)
+#define RTASE_MITI_DEFAULT_TIME 128
+#define RTASE_MITI_MAX_TIME 491520
+#define RTASE_MITI_PKT_NUM_COUNT_MASK GENMASK(11, 8)
+#define RTASE_MITI_PKT_NUM_UNIT_MASK GENMASK(13, 12)
+#define RTASE_MITI_DEFAULT_PKT_NUM 64
+#define RTASE_MITI_MAX_PKT_NUM_IDX 3
+#define RTASE_MITI_MAX_PKT_NUM_UNIT 16
+#define RTASE_MITI_MAX_PKT_NUM 240
+#define RTASE_MITI_COUNT_BIT_NUM 4
+
+#define RTASE_NUM_MSIX 4
+
+#define RTASE_DWORD_MOD 16
+
+/*****************************************************************************/
+enum rtase_registers {
+ RTASE_MAC0 = 0x0000,
+ RTASE_MAC4 = 0x0004,
+ RTASE_MAR0 = 0x0008,
+ RTASE_MAR1 = 0x000C,
+ RTASE_DTCCR0 = 0x0010,
+ RTASE_DTCCR4 = 0x0014,
+#define RTASE_COUNTER_RESET BIT(0)
+#define RTASE_COUNTER_DUMP BIT(3)
+
+ RTASE_FCR = 0x0018,
+#define RTASE_FCR_RXQ_MASK GENMASK(5, 4)
+
+ RTASE_LBK_CTRL = 0x001A,
+#define RTASE_LBK_ATLD BIT(1)
+#define RTASE_LBK_CLR BIT(0)
+
+ RTASE_TX_DESC_ADDR0 = 0x0020,
+ RTASE_TX_DESC_ADDR4 = 0x0024,
+ RTASE_TX_DESC_COMMAND = 0x0028,
+#define RTASE_TX_DESC_CMD_CS BIT(15)
+#define RTASE_TX_DESC_CMD_WE BIT(14)
+
+ RTASE_BOOT_CTL = 0x6004,
+ RTASE_CLKSW_SET = 0x6018,
+
+ RTASE_CHIP_CMD = 0x0037,
+#define RTASE_STOP_REQ BIT(7)
+#define RTASE_STOP_REQ_DONE BIT(6)
+#define RTASE_RE BIT(3)
+#define RTASE_TE BIT(2)
+
+ RTASE_IMR0 = 0x0038,
+ RTASE_ISR0 = 0x003C,
+#define RTASE_TOK7 BIT(30)
+#define RTASE_TOK6 BIT(28)
+#define RTASE_TOK5 BIT(26)
+#define RTASE_TOK4 BIT(24)
+#define RTASE_FOVW BIT(6)
+#define RTASE_RDU BIT(4)
+#define RTASE_TOK BIT(2)
+#define RTASE_ROK BIT(0)
+
+ RTASE_IMR1 = 0x0800,
+ RTASE_ISR1 = 0x0802,
+#define RTASE_Q_TOK BIT(4)
+#define RTASE_Q_RDU BIT(1)
+#define RTASE_Q_ROK BIT(0)
+
+ RTASE_EPHY_ISR = 0x6014,
+ RTASE_EPHY_IMR = 0x6016,
+
+ RTASE_TX_CONFIG_0 = 0x0040,
+#define RTASE_TX_INTER_FRAME_GAP_MASK GENMASK(25, 24)
+ /* DMA burst value (0-7) is shift this many bits */
+#define RTASE_TX_DMA_MASK GENMASK(10, 8)
+
+ RTASE_RX_CONFIG_0 = 0x0044,
+#define RTASE_RX_SINGLE_FETCH BIT(14)
+#define RTASE_RX_SINGLE_TAG BIT(13)
+#define RTASE_RX_MX_DMA_MASK GENMASK(10, 8)
+#define RTASE_ACPT_FLOW BIT(7)
+#define RTASE_ACCEPT_ERR BIT(5)
+#define RTASE_ACCEPT_RUNT BIT(4)
+#define RTASE_ACCEPT_BROADCAST BIT(3)
+#define RTASE_ACCEPT_MULTICAST BIT(2)
+#define RTASE_ACCEPT_MYPHYS BIT(1)
+#define RTASE_ACCEPT_ALLPHYS BIT(0)
+#define RTASE_ACCEPT_MASK (RTASE_ACPT_FLOW | RTASE_ACCEPT_ERR | \
+ RTASE_ACCEPT_RUNT | RTASE_ACCEPT_BROADCAST | \
+ RTASE_ACCEPT_MULTICAST | RTASE_ACCEPT_MYPHYS | \
+ RTASE_ACCEPT_ALLPHYS)
+
+ RTASE_RX_CONFIG_1 = 0x0046,
+#define RTASE_RX_MAX_FETCH_DESC_MASK GENMASK(15, 11)
+#define RTASE_RX_NEW_DESC_FORMAT_EN BIT(8)
+#define RTASE_OUTER_VLAN_DETAG_EN BIT(7)
+#define RTASE_INNER_VLAN_DETAG_EN BIT(6)
+#define RTASE_PCIE_NEW_FLOW BIT(2)
+#define RTASE_PCIE_RELOAD_EN BIT(0)
+
+ RTASE_EEM = 0x0050,
+#define RTASE_EEM_UNLOCK 0xC0
+
+ RTASE_TDFNR = 0x0057,
+ RTASE_TPPOLL = 0x0090,
+ RTASE_PDR = 0x00B0,
+ RTASE_FIFOR = 0x00D3,
+#define RTASE_TX_FIFO_EMPTY BIT(5)
+#define RTASE_RX_FIFO_EMPTY BIT(4)
+
+ RTASE_RMS = 0x00DA,
+ RTASE_CPLUS_CMD = 0x00E0,
+#define RTASE_FORCE_RXFLOW_EN BIT(11)
+#define RTASE_FORCE_TXFLOW_EN BIT(10)
+#define RTASE_RX_CHKSUM BIT(5)
+
+ RTASE_Q0_RX_DESC_ADDR0 = 0x00E4,
+ RTASE_Q0_RX_DESC_ADDR4 = 0x00E8,
+ RTASE_Q1_RX_DESC_ADDR0 = 0x4000,
+ RTASE_Q1_RX_DESC_ADDR4 = 0x4004,
+ RTASE_MTPS = 0x00EC,
+#define RTASE_TAG_NUM_SEL_MASK GENMASK(10, 8)
+
+ RTASE_MISC = 0x00F2,
+#define RTASE_RX_DV_GATE_EN BIT(3)
+
+ RTASE_TFUN_CTRL = 0x0400,
+#define RTASE_TX_NEW_DESC_FORMAT_EN BIT(0)
+
+ RTASE_TX_CONFIG_1 = 0x203E,
+#define RTASE_TC_MODE_MASK GENMASK(11, 10)
+
+ RTASE_TOKSEL = 0x2046,
+ RTASE_RFIFONFULL = 0x4406,
+ RTASE_INT_MITI_TX = 0x0A00,
+ RTASE_INT_MITI_RX = 0x0A80,
+
+ RTASE_VLAN_ENTRY_0 = 0xAC80,
+};
+
+enum rtase_desc_status_bit {
+ RTASE_DESC_OWN = BIT(31), /* Descriptor is owned by NIC */
+ RTASE_RING_END = BIT(30), /* End of descriptor ring */
+};
+
+enum rtase_sw_flag_content {
+ RTASE_SWF_MSI_ENABLED = BIT(1),
+ RTASE_SWF_MSIX_ENABLED = BIT(2),
+};
+
+#define RSVD_MASK 0x3FFFC000
+
+struct rtase_tx_desc {
+ __le32 opts1;
+ __le32 opts2;
+ __le64 addr;
+ __le32 opts3;
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 reserved3;
+} __packed;
+
+/*------ offset 0 of tx descriptor ------*/
+#define RTASE_TX_FIRST_FRAG BIT(29) /* Tx First segment of a packet */
+#define RTASE_TX_LAST_FRAG BIT(28) /* Tx Final segment of a packet */
+#define RTASE_GIANT_SEND_V4 BIT(26) /* TCP Giant Send Offload V4 (GSOv4) */
+#define RTASE_GIANT_SEND_V6 BIT(25) /* TCP Giant Send Offload V6 (GSOv6) */
+#define RTASE_TX_VLAN_TAG BIT(17) /* Add VLAN tag */
+
+/*------ offset 4 of tx descriptor ------*/
+#define RTASE_TX_UDPCS_C BIT(31) /* Calculate UDP/IP checksum */
+#define RTASE_TX_TCPCS_C BIT(30) /* Calculate TCP/IP checksum */
+#define RTASE_TX_IPCS_C BIT(29) /* Calculate IP checksum */
+#define RTASE_TX_IPV6F_C BIT(28) /* Indicate it is an IPv6 packet */
+
+union rtase_rx_desc {
+ struct {
+ __le64 header_buf_addr;
+ __le32 reserved1;
+ __le32 opts_header_len;
+ __le64 addr;
+ __le32 reserved2;
+ __le32 opts1;
+ } __packed desc_cmd;
+
+ struct {
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 rss;
+ __le32 opts4;
+ __le32 reserved3;
+ __le32 opts3;
+ __le32 opts2;
+ __le32 opts1;
+ } __packed desc_status;
+} __packed;
+
+/*------ offset 28 of rx descriptor ------*/
+#define RTASE_RX_FIRST_FRAG BIT(25) /* Rx First segment of a packet */
+#define RTASE_RX_LAST_FRAG BIT(24) /* Rx Final segment of a packet */
+#define RTASE_RX_RES BIT(20)
+#define RTASE_RX_RUNT BIT(19)
+#define RTASE_RX_RWT BIT(18)
+#define RTASE_RX_CRC BIT(16)
+#define RTASE_RX_V6F BIT(31)
+#define RTASE_RX_V4F BIT(30)
+#define RTASE_RX_UDPT BIT(29)
+#define RTASE_RX_TCPT BIT(28)
+#define RTASE_RX_IPF BIT(26) /* IP checksum failed */
+#define RTASE_RX_UDPF BIT(25) /* UDP/IP checksum failed */
+#define RTASE_RX_TCPF BIT(24) /* TCP/IP checksum failed */
+#define RTASE_RX_VLAN_TAG BIT(16) /* VLAN tag available */
+
+#define RTASE_NUM_DESC 1024
+#define RTASE_TX_BUDGET_DEFAULT 256
+#define RTASE_TX_RING_DESC_SIZE (RTASE_NUM_DESC * sizeof(struct rtase_tx_desc))
+#define RTASE_RX_RING_DESC_SIZE (RTASE_NUM_DESC * sizeof(union rtase_rx_desc))
+#define RTASE_TX_STOP_THRS (MAX_SKB_FRAGS + 1)
+#define RTASE_TX_START_THRS (2 * RTASE_TX_STOP_THRS)
+#define RTASE_VLAN_TAG_MASK GENMASK(15, 0)
+#define RTASE_RX_PKT_SIZE_MASK GENMASK(13, 0)
+
+#define RTASE_IVEC_NAME_SIZE (IFNAMSIZ + 10)
+
+struct rtase_int_vector {
+ struct rtase_private *tp;
+ unsigned int irq;
+ char name[RTASE_IVEC_NAME_SIZE];
+ u16 index;
+ u16 imr_addr;
+ u16 isr_addr;
+ u32 imr;
+ struct list_head ring_list;
+ struct napi_struct napi;
+ int (*poll)(struct napi_struct *napi, int budget);
+};
+
+struct rtase_ring {
+ struct rtase_int_vector *ivec;
+ void *desc;
+ dma_addr_t phy_addr;
+ u32 cur_idx;
+ u32 dirty_idx;
+ u16 index;
+
+ struct sk_buff *skbuff[RTASE_NUM_DESC];
+ void *data_buf[RTASE_NUM_DESC];
+ union {
+ u32 len[RTASE_NUM_DESC];
+ dma_addr_t data_phy_addr[RTASE_NUM_DESC];
+ } mis;
+
+ struct list_head ring_entry;
+ int (*ring_handler)(struct rtase_ring *ring, int budget);
+ u64 alloc_fail;
+};
+
+struct rtase_stats {
+ u64 tx_dropped;
+ u64 rx_dropped;
+ u64 multicast;
+ u64 rx_errors;
+ u64 rx_length_errors;
+ u64 rx_crc_errors;
+};
+
+struct rtase_private {
+ void __iomem *mmio_addr;
+ u32 sw_flag;
+
+ struct pci_dev *pdev;
+ struct net_device *dev;
+ u32 rx_buf_sz;
+
+ struct page_pool *page_pool;
+ struct rtase_ring tx_ring[RTASE_NUM_TX_QUEUE];
+ struct rtase_ring rx_ring[RTASE_NUM_RX_QUEUE];
+ struct rtase_counters *tally_vaddr;
+ dma_addr_t tally_paddr;
+
+ u32 vlan_filter_ctrl;
+ u16 vlan_filter_vid[RTASE_VLAN_FILTER_ENTRY_NUM];
+
+ struct msix_entry msix_entry[RTASE_NUM_MSIX];
+ struct rtase_int_vector int_vector[RTASE_NUM_MSIX];
+
+ struct rtase_stats stats;
+
+ u16 tx_queue_ctrl;
+ u16 func_tx_queue_num;
+ u16 func_rx_queue_num;
+ u16 int_nums;
+ u16 tx_int_mit;
+ u16 rx_int_mit;
+};
+
+#define RTASE_LSO_64K 64000
+
+#define RTASE_NIC_MAX_PHYS_BUF_COUNT_LSO2 (16 * 4)
+
+#define RTASE_TCPHO_MASK GENMASK(24, 18)
+
+#define RTASE_MSS_MASK GENMASK(28, 18)
+
+#endif /* RTASE_H */
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
new file mode 100644
index 000000000000..f8777b7663d3
--- /dev/null
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -0,0 +1,2288 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * rtase is the Linux device driver released for Realtek Automotive Switch
+ * controllers with PCI-Express interface.
+ *
+ * Copyright(c) 2024 Realtek Semiconductor Corp.
+ *
+ * Below is a simplified block diagram of the chip and its relevant interfaces.
+ *
+ * *************************
+ * * *
+ * * CPU network device *
+ * * *
+ * * +-------------+ *
+ * * | PCIE Host | *
+ * ***********++************
+ * ||
+ * PCIE
+ * ||
+ * ********************++**********************
+ * * | PCIE Endpoint | *
+ * * +---------------+ *
+ * * | GMAC | *
+ * * +--++--+ Realtek *
+ * * || RTL90xx Series *
+ * * || *
+ * * +-------------++----------------+ *
+ * * | | MAC | | *
+ * * | +-----+ | *
+ * * | | *
+ * * | Ethernet Switch Core | *
+ * * | | *
+ * * | +-----+ +-----+ | *
+ * * | | MAC |...........| MAC | | *
+ * * +---+-----+-----------+-----+---+ *
+ * * | PHY |...........| PHY | *
+ * * +--++-+ +--++-+ *
+ * *************||****************||***********
+ *
+ * The block of the Realtek RTL90xx series is our entire chip architecture,
+ * the GMAC is connected to the switch core, and there is no PHY in between.
+ * In addition, this driver is mainly used to control GMAC, but does not
+ * control the switch core, so it is not the same as DSA. Linux only plays
+ * the role of a normal leaf node in this model.
+ */
+
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/prefetch.h>
+#include <linux/rtnetlink.h>
+#include <linux/tcp.h>
+#include <asm/irq.h>
+#include <net/ip6_checksum.h>
+#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
+#include <net/pkt_cls.h>
+
+#include "rtase.h"
+
+#define RTK_OPTS1_DEBUG_VALUE 0x0BADBEEF
+#define RTK_MAGIC_NUMBER 0x0BADBADBADBADBAD
+
+static const struct pci_device_id rtase_pci_tbl[] = {
+ {PCI_VDEVICE(REALTEK, 0x906A)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, rtase_pci_tbl);
+
+MODULE_AUTHOR("Realtek ARD Software Team");
+MODULE_DESCRIPTION("Network Driver for the PCIe interface of Realtek Automotive Ethernet Switch");
+MODULE_LICENSE("Dual BSD/GPL");
+
+struct rtase_counters {
+ __le64 tx_packets;
+ __le64 rx_packets;
+ __le64 tx_errors;
+ __le32 rx_errors;
+ __le16 rx_missed;
+ __le16 align_errors;
+ __le32 tx_one_collision;
+ __le32 tx_multi_collision;
+ __le64 rx_unicast;
+ __le64 rx_broadcast;
+ __le32 rx_multicast;
+ __le16 tx_aborted;
+ __le16 tx_underrun;
+} __packed;
+
+static void rtase_w8(const struct rtase_private *tp, u16 reg, u8 val8)
+{
+ writeb(val8, tp->mmio_addr + reg);
+}
+
+static void rtase_w16(const struct rtase_private *tp, u16 reg, u16 val16)
+{
+ writew(val16, tp->mmio_addr + reg);
+}
+
+static void rtase_w32(const struct rtase_private *tp, u16 reg, u32 val32)
+{
+ writel(val32, tp->mmio_addr + reg);
+}
+
+static u8 rtase_r8(const struct rtase_private *tp, u16 reg)
+{
+ return readb(tp->mmio_addr + reg);
+}
+
+static u16 rtase_r16(const struct rtase_private *tp, u16 reg)
+{
+ return readw(tp->mmio_addr + reg);
+}
+
+static u32 rtase_r32(const struct rtase_private *tp, u16 reg)
+{
+ return readl(tp->mmio_addr + reg);
+}
+
+static void rtase_free_desc(struct rtase_private *tp)
+{
+ struct pci_dev *pdev = tp->pdev;
+ u32 i;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++) {
+ if (!tp->tx_ring[i].desc)
+ continue;
+
+ dma_free_coherent(&pdev->dev, RTASE_TX_RING_DESC_SIZE,
+ tp->tx_ring[i].desc,
+ tp->tx_ring[i].phy_addr);
+ tp->tx_ring[i].desc = NULL;
+ }
+
+ for (i = 0; i < tp->func_rx_queue_num; i++) {
+ if (!tp->rx_ring[i].desc)
+ continue;
+
+ dma_free_coherent(&pdev->dev, RTASE_RX_RING_DESC_SIZE,
+ tp->rx_ring[i].desc,
+ tp->rx_ring[i].phy_addr);
+ tp->rx_ring[i].desc = NULL;
+ }
+}
+
+static int rtase_alloc_desc(struct rtase_private *tp)
+{
+ struct pci_dev *pdev = tp->pdev;
+ u32 i;
+
+ /* rx and tx descriptors needs 256 bytes alignment.
+ * dma_alloc_coherent provides more.
+ */
+ for (i = 0; i < tp->func_tx_queue_num; i++) {
+ tp->tx_ring[i].desc =
+ dma_alloc_coherent(&pdev->dev,
+ RTASE_TX_RING_DESC_SIZE,
+ &tp->tx_ring[i].phy_addr,
+ GFP_KERNEL);
+ if (!tp->tx_ring[i].desc)
+ goto err_out;
+ }
+
+ for (i = 0; i < tp->func_rx_queue_num; i++) {
+ tp->rx_ring[i].desc =
+ dma_alloc_coherent(&pdev->dev,
+ RTASE_RX_RING_DESC_SIZE,
+ &tp->rx_ring[i].phy_addr,
+ GFP_KERNEL);
+ if (!tp->rx_ring[i].desc)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ rtase_free_desc(tp);
+ return -ENOMEM;
+}
+
+static void rtase_unmap_tx_skb(struct pci_dev *pdev, u32 len,
+ struct rtase_tx_desc *desc)
+{
+ dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len,
+ DMA_TO_DEVICE);
+ desc->opts1 = cpu_to_le32(RTK_OPTS1_DEBUG_VALUE);
+ desc->opts2 = 0x00;
+ desc->addr = cpu_to_le64(RTK_MAGIC_NUMBER);
+}
+
+static void rtase_tx_clear_range(struct rtase_ring *ring, u32 start, u32 n)
+{
+ struct rtase_tx_desc *desc_base = ring->desc;
+ struct rtase_private *tp = ring->ivec->tp;
+ u32 i;
+
+ for (i = 0; i < n; i++) {
+ u32 entry = (start + i) % RTASE_NUM_DESC;
+ struct rtase_tx_desc *desc = desc_base + entry;
+ u32 len = ring->mis.len[entry];
+ struct sk_buff *skb;
+
+ if (len == 0)
+ continue;
+
+ rtase_unmap_tx_skb(tp->pdev, len, desc);
+ ring->mis.len[entry] = 0;
+ skb = ring->skbuff[entry];
+ if (!skb)
+ continue;
+
+ tp->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ ring->skbuff[entry] = NULL;
+ }
+}
+
+static void rtase_tx_clear(struct rtase_private *tp)
+{
+ struct rtase_ring *ring;
+ u16 i;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++) {
+ ring = &tp->tx_ring[i];
+ rtase_tx_clear_range(ring, ring->dirty_idx, RTASE_NUM_DESC);
+ ring->cur_idx = 0;
+ ring->dirty_idx = 0;
+ }
+}
+
+static void rtase_mark_to_asic(union rtase_rx_desc *desc, u32 rx_buf_sz)
+{
+ u32 eor = le32_to_cpu(desc->desc_cmd.opts1) & RTASE_RING_END;
+
+ desc->desc_status.opts2 = 0;
+ /* force memory writes to complete before releasing descriptor */
+ dma_wmb();
+ WRITE_ONCE(desc->desc_cmd.opts1,
+ cpu_to_le32(RTASE_DESC_OWN | eor | rx_buf_sz));
+}
+
+static u32 rtase_tx_avail(struct rtase_ring *ring)
+{
+ return READ_ONCE(ring->dirty_idx) + RTASE_NUM_DESC -
+ READ_ONCE(ring->cur_idx);
+}
+
+static int tx_handler(struct rtase_ring *ring, int budget)
+{
+ const struct rtase_private *tp = ring->ivec->tp;
+ struct net_device *dev = tp->dev;
+ u32 dirty_tx, tx_left;
+ u32 bytes_compl = 0;
+ u32 pkts_compl = 0;
+ int workdone = 0;
+
+ dirty_tx = ring->dirty_idx;
+ tx_left = READ_ONCE(ring->cur_idx) - dirty_tx;
+
+ while (tx_left > 0) {
+ u32 entry = dirty_tx % RTASE_NUM_DESC;
+ struct rtase_tx_desc *desc = ring->desc +
+ sizeof(struct rtase_tx_desc) * entry;
+ u32 status;
+
+ status = le32_to_cpu(desc->opts1);
+
+ if (status & RTASE_DESC_OWN)
+ break;
+
+ rtase_unmap_tx_skb(tp->pdev, ring->mis.len[entry], desc);
+ ring->mis.len[entry] = 0;
+ if (ring->skbuff[entry]) {
+ pkts_compl++;
+ bytes_compl += ring->skbuff[entry]->len;
+ napi_consume_skb(ring->skbuff[entry], budget);
+ ring->skbuff[entry] = NULL;
+ }
+
+ dirty_tx++;
+ tx_left--;
+ workdone++;
+
+ if (workdone == RTASE_TX_BUDGET_DEFAULT)
+ break;
+ }
+
+ if (ring->dirty_idx != dirty_tx) {
+ dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl);
+ WRITE_ONCE(ring->dirty_idx, dirty_tx);
+
+ netif_subqueue_completed_wake(dev, ring->index, pkts_compl,
+ bytes_compl,
+ rtase_tx_avail(ring),
+ RTASE_TX_START_THRS);
+
+ if (ring->cur_idx != dirty_tx)
+ rtase_w8(tp, RTASE_TPPOLL, BIT(ring->index));
+ }
+
+ return 0;
+}
+
+static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
+{
+ struct rtase_ring *ring = &tp->tx_ring[idx];
+ struct rtase_tx_desc *desc;
+ u32 i;
+
+ memset(ring->desc, 0x0, RTASE_TX_RING_DESC_SIZE);
+ memset(ring->skbuff, 0x0, sizeof(ring->skbuff));
+ ring->cur_idx = 0;
+ ring->dirty_idx = 0;
+ ring->index = idx;
+ ring->alloc_fail = 0;
+
+ for (i = 0; i < RTASE_NUM_DESC; i++) {
+ ring->mis.len[i] = 0;
+ if ((RTASE_NUM_DESC - 1) == i) {
+ desc = ring->desc + sizeof(struct rtase_tx_desc) * i;
+ desc->opts1 = cpu_to_le32(RTASE_RING_END);
+ }
+ }
+
+ ring->ring_handler = tx_handler;
+ if (idx < 4) {
+ ring->ivec = &tp->int_vector[idx];
+ list_add_tail(&ring->ring_entry,
+ &tp->int_vector[idx].ring_list);
+ } else {
+ ring->ivec = &tp->int_vector[0];
+ list_add_tail(&ring->ring_entry, &tp->int_vector[0].ring_list);
+ }
+}
+
+static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t mapping,
+ u32 rx_buf_sz)
+{
+ desc->desc_cmd.addr = cpu_to_le64(mapping);
+
+ rtase_mark_to_asic(desc, rx_buf_sz);
+}
+
+static void rtase_make_unusable_by_asic(union rtase_rx_desc *desc)
+{
+ desc->desc_cmd.addr = cpu_to_le64(RTK_MAGIC_NUMBER);
+ desc->desc_cmd.opts1 &= ~cpu_to_le32(RTASE_DESC_OWN | RSVD_MASK);
+}
+
+static int rtase_alloc_rx_data_buf(struct rtase_ring *ring,
+ void **p_data_buf,
+ union rtase_rx_desc *desc,
+ dma_addr_t *rx_phy_addr)
+{
+ struct rtase_int_vector *ivec = ring->ivec;
+ const struct rtase_private *tp = ivec->tp;
+ dma_addr_t mapping;
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(tp->page_pool);
+ if (!page) {
+ ring->alloc_fail++;
+ goto err_out;
+ }
+
+ *p_data_buf = page_address(page);
+ mapping = page_pool_get_dma_addr(page);
+ *rx_phy_addr = mapping;
+ rtase_map_to_asic(desc, mapping, tp->rx_buf_sz);
+
+ return 0;
+
+err_out:
+ rtase_make_unusable_by_asic(desc);
+
+ return -ENOMEM;
+}
+
+static u32 rtase_rx_ring_fill(struct rtase_ring *ring, u32 ring_start,
+ u32 ring_end)
+{
+ union rtase_rx_desc *desc_base = ring->desc;
+ u32 cur;
+
+ for (cur = ring_start; ring_end - cur > 0; cur++) {
+ u32 i = cur % RTASE_NUM_DESC;
+ union rtase_rx_desc *desc = desc_base + i;
+ int ret;
+
+ if (ring->data_buf[i])
+ continue;
+
+ ret = rtase_alloc_rx_data_buf(ring, &ring->data_buf[i], desc,
+ &ring->mis.data_phy_addr[i]);
+ if (ret)
+ break;
+ }
+
+ return cur - ring_start;
+}
+
+static void rtase_mark_as_last_descriptor(union rtase_rx_desc *desc)
+{
+ desc->desc_cmd.opts1 |= cpu_to_le32(RTASE_RING_END);
+}
+
+static void rtase_rx_ring_clear(struct page_pool *page_pool,
+ struct rtase_ring *ring)
+{
+ union rtase_rx_desc *desc;
+ struct page *page;
+ u32 i;
+
+ for (i = 0; i < RTASE_NUM_DESC; i++) {
+ desc = ring->desc + sizeof(union rtase_rx_desc) * i;
+ page = virt_to_head_page(ring->data_buf[i]);
+
+ if (ring->data_buf[i])
+ page_pool_put_full_page(page_pool, page, true);
+
+ rtase_make_unusable_by_asic(desc);
+ }
+}
+
+static int rtase_fragmented_frame(u32 status)
+{
+ return (status & (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG)) !=
+ (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG);
+}
+
+static void rtase_rx_csum(const struct rtase_private *tp, struct sk_buff *skb,
+ const union rtase_rx_desc *desc)
+{
+ u32 opts2 = le32_to_cpu(desc->desc_status.opts2);
+
+ /* rx csum offload */
+ if (((opts2 & RTASE_RX_V4F) && !(opts2 & RTASE_RX_IPF)) ||
+ (opts2 & RTASE_RX_V6F)) {
+ if (((opts2 & RTASE_RX_TCPT) && !(opts2 & RTASE_RX_TCPF)) ||
+ ((opts2 & RTASE_RX_UDPT) && !(opts2 & RTASE_RX_UDPF)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+}
+
+static void rtase_rx_vlan_skb(union rtase_rx_desc *desc, struct sk_buff *skb)
+{
+ u32 opts2 = le32_to_cpu(desc->desc_status.opts2);
+
+ if (!(opts2 & RTASE_RX_VLAN_TAG))
+ return;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ swab16(opts2 & RTASE_VLAN_TAG_MASK));
+}
+
+static void rtase_rx_skb(const struct rtase_ring *ring, struct sk_buff *skb)
+{
+ struct rtase_int_vector *ivec = ring->ivec;
+
+ napi_gro_receive(&ivec->napi, skb);
+}
+
+static int rx_handler(struct rtase_ring *ring, int budget)
+{
+ union rtase_rx_desc *desc_base = ring->desc;
+ u32 pkt_size, cur_rx, delta, entry, status;
+ struct rtase_private *tp = ring->ivec->tp;
+ struct net_device *dev = tp->dev;
+ union rtase_rx_desc *desc;
+ struct sk_buff *skb;
+ int workdone = 0;
+
+ cur_rx = ring->cur_idx;
+ entry = cur_rx % RTASE_NUM_DESC;
+ desc = &desc_base[entry];
+
+ while (workdone < budget) {
+ status = le32_to_cpu(desc->desc_status.opts1);
+
+ if (status & RTASE_DESC_OWN)
+ break;
+
+ /* This barrier is needed to keep us from reading
+ * any other fields out of the rx descriptor until
+ * we know the status of RTASE_DESC_OWN
+ */
+ dma_rmb();
+
+ if (unlikely(status & RTASE_RX_RES)) {
+ if (net_ratelimit())
+ netdev_warn(dev, "Rx ERROR. status = %08x\n",
+ status);
+
+ tp->stats.rx_errors++;
+
+ if (status & (RTASE_RX_RWT | RTASE_RX_RUNT))
+ tp->stats.rx_length_errors++;
+
+ if (status & RTASE_RX_CRC)
+ tp->stats.rx_crc_errors++;
+
+ if (dev->features & NETIF_F_RXALL)
+ goto process_pkt;
+
+ rtase_mark_to_asic(desc, tp->rx_buf_sz);
+ goto skip_process_pkt;
+ }
+
+process_pkt:
+ pkt_size = status & RTASE_RX_PKT_SIZE_MASK;
+ if (likely(!(dev->features & NETIF_F_RXFCS)))
+ pkt_size -= ETH_FCS_LEN;
+
+ /* The driver does not support incoming fragmented frames.
+ * They are seen as a symptom of over-mtu sized frames.
+ */
+ if (unlikely(rtase_fragmented_frame(status))) {
+ tp->stats.rx_dropped++;
+ tp->stats.rx_length_errors++;
+ rtase_mark_to_asic(desc, tp->rx_buf_sz);
+ goto skip_process_pkt;
+ }
+
+ dma_sync_single_for_cpu(&tp->pdev->dev,
+ ring->mis.data_phy_addr[entry],
+ tp->rx_buf_sz, DMA_FROM_DEVICE);
+
+ skb = build_skb(ring->data_buf[entry], PAGE_SIZE);
+ if (!skb) {
+ tp->stats.rx_dropped++;
+ rtase_mark_to_asic(desc, tp->rx_buf_sz);
+ goto skip_process_pkt;
+ }
+ ring->data_buf[entry] = NULL;
+
+ if (dev->features & NETIF_F_RXCSUM)
+ rtase_rx_csum(tp, skb, desc);
+
+ skb_put(skb, pkt_size);
+ skb_mark_for_recycle(skb);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (skb->pkt_type == PACKET_MULTICAST)
+ tp->stats.multicast++;
+
+ rtase_rx_vlan_skb(desc, skb);
+ rtase_rx_skb(ring, skb);
+
+ dev_sw_netstats_rx_add(dev, pkt_size);
+
+skip_process_pkt:
+ workdone++;
+ cur_rx++;
+ entry = cur_rx % RTASE_NUM_DESC;
+ desc = ring->desc + sizeof(union rtase_rx_desc) * entry;
+ }
+
+ ring->cur_idx = cur_rx;
+ delta = rtase_rx_ring_fill(ring, ring->dirty_idx, ring->cur_idx);
+ ring->dirty_idx += delta;
+
+ return workdone;
+}
+
+static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
+{
+ struct rtase_ring *ring = &tp->rx_ring[idx];
+ u16 i;
+
+ memset(ring->desc, 0x0, RTASE_RX_RING_DESC_SIZE);
+ memset(ring->data_buf, 0x0, sizeof(ring->data_buf));
+ ring->cur_idx = 0;
+ ring->dirty_idx = 0;
+ ring->index = idx;
+ ring->alloc_fail = 0;
+
+ for (i = 0; i < RTASE_NUM_DESC; i++)
+ ring->mis.data_phy_addr[i] = 0;
+
+ ring->ring_handler = rx_handler;
+ ring->ivec = &tp->int_vector[idx];
+ list_add_tail(&ring->ring_entry, &tp->int_vector[idx].ring_list);
+}
+
+static void rtase_rx_clear(struct rtase_private *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->func_rx_queue_num; i++)
+ rtase_rx_ring_clear(tp->page_pool, &tp->rx_ring[i]);
+
+ page_pool_destroy(tp->page_pool);
+ tp->page_pool = NULL;
+}
+
+static int rtase_init_ring(const struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ struct page_pool_params pp_params = { 0 };
+ struct page_pool *page_pool;
+ u32 num;
+ u16 i;
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.order = 0;
+ pp_params.pool_size = RTASE_NUM_DESC * tp->func_rx_queue_num;
+ pp_params.nid = dev_to_node(&tp->pdev->dev);
+ pp_params.dev = &tp->pdev->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = PAGE_SIZE;
+ pp_params.offset = 0;
+
+ page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(page_pool)) {
+ netdev_err(tp->dev, "failed to create page pool\n");
+ return -ENOMEM;
+ }
+
+ tp->page_pool = page_pool;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++)
+ rtase_tx_desc_init(tp, i);
+
+ for (i = 0; i < tp->func_rx_queue_num; i++) {
+ rtase_rx_desc_init(tp, i);
+
+ num = rtase_rx_ring_fill(&tp->rx_ring[i], 0, RTASE_NUM_DESC);
+ if (num != RTASE_NUM_DESC)
+ goto err_out;
+
+ rtase_mark_as_last_descriptor(tp->rx_ring[i].desc +
+ sizeof(union rtase_rx_desc) *
+ (RTASE_NUM_DESC - 1));
+ }
+
+ return 0;
+
+err_out:
+ rtase_rx_clear(tp);
+ return -ENOMEM;
+}
+
+static void rtase_interrupt_mitigation(const struct rtase_private *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++)
+ rtase_w16(tp, RTASE_INT_MITI_TX + i * 2, tp->tx_int_mit);
+
+ for (i = 0; i < tp->func_rx_queue_num; i++)
+ rtase_w16(tp, RTASE_INT_MITI_RX + i * 2, tp->rx_int_mit);
+}
+
+static void rtase_tally_counter_addr_fill(const struct rtase_private *tp)
+{
+ rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(tp->tally_paddr));
+ rtase_w32(tp, RTASE_DTCCR0, lower_32_bits(tp->tally_paddr));
+}
+
+static void rtase_tally_counter_clear(const struct rtase_private *tp)
+{
+ u32 cmd = lower_32_bits(tp->tally_paddr);
+
+ rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(tp->tally_paddr));
+ rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_RESET);
+}
+
+static void rtase_desc_addr_fill(const struct rtase_private *tp)
+{
+ const struct rtase_ring *ring;
+ u16 i, cmd, val;
+ int err;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++) {
+ ring = &tp->tx_ring[i];
+
+ rtase_w32(tp, RTASE_TX_DESC_ADDR0,
+ lower_32_bits(ring->phy_addr));
+ rtase_w32(tp, RTASE_TX_DESC_ADDR4,
+ upper_32_bits(ring->phy_addr));
+
+ cmd = i | RTASE_TX_DESC_CMD_WE | RTASE_TX_DESC_CMD_CS;
+ rtase_w16(tp, RTASE_TX_DESC_COMMAND, cmd);
+
+ err = read_poll_timeout(rtase_r16, val,
+ !(val & RTASE_TX_DESC_CMD_CS), 10,
+ 1000, false, tp,
+ RTASE_TX_DESC_COMMAND);
+
+ if (err == -ETIMEDOUT)
+ netdev_err(tp->dev,
+ "error occurred in fill tx descriptor\n");
+ }
+
+ for (i = 0; i < tp->func_rx_queue_num; i++) {
+ ring = &tp->rx_ring[i];
+
+ if (i == 0) {
+ rtase_w32(tp, RTASE_Q0_RX_DESC_ADDR0,
+ lower_32_bits(ring->phy_addr));
+ rtase_w32(tp, RTASE_Q0_RX_DESC_ADDR4,
+ upper_32_bits(ring->phy_addr));
+ } else {
+ rtase_w32(tp, (RTASE_Q1_RX_DESC_ADDR0 + ((i - 1) * 8)),
+ lower_32_bits(ring->phy_addr));
+ rtase_w32(tp, (RTASE_Q1_RX_DESC_ADDR4 + ((i - 1) * 8)),
+ upper_32_bits(ring->phy_addr));
+ }
+ }
+}
+
+static void rtase_hw_set_features(const struct net_device *dev,
+ netdev_features_t features)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 rx_config, val;
+
+ rx_config = rtase_r16(tp, RTASE_RX_CONFIG_0);
+ if (features & NETIF_F_RXALL)
+ rx_config |= (RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT);
+ else
+ rx_config &= ~(RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT);
+
+ rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config);
+
+ val = rtase_r16(tp, RTASE_CPLUS_CMD);
+ if (features & NETIF_F_RXCSUM)
+ rtase_w16(tp, RTASE_CPLUS_CMD, val | RTASE_RX_CHKSUM);
+ else
+ rtase_w16(tp, RTASE_CPLUS_CMD, val & ~RTASE_RX_CHKSUM);
+
+ rx_config = rtase_r16(tp, RTASE_RX_CONFIG_1);
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ rx_config |= (RTASE_INNER_VLAN_DETAG_EN |
+ RTASE_OUTER_VLAN_DETAG_EN);
+ else
+ rx_config &= ~(RTASE_INNER_VLAN_DETAG_EN |
+ RTASE_OUTER_VLAN_DETAG_EN);
+
+ rtase_w16(tp, RTASE_RX_CONFIG_1, rx_config);
+}
+
+static void rtase_hw_set_rx_packet_filter(struct net_device *dev)
+{
+ u32 mc_filter[2] = { 0xFFFFFFFF, 0xFFFFFFFF };
+ struct rtase_private *tp = netdev_priv(dev);
+ u16 rx_mode;
+
+ rx_mode = rtase_r16(tp, RTASE_RX_CONFIG_0) & ~RTASE_ACCEPT_MASK;
+ rx_mode |= RTASE_ACCEPT_BROADCAST | RTASE_ACCEPT_MYPHYS;
+
+ if (dev->flags & IFF_PROMISC) {
+ rx_mode |= RTASE_ACCEPT_MULTICAST | RTASE_ACCEPT_ALLPHYS;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ rx_mode |= RTASE_ACCEPT_MULTICAST;
+ } else {
+ struct netdev_hw_addr *hw_addr;
+
+ mc_filter[0] = 0;
+ mc_filter[1] = 0;
+
+ netdev_for_each_mc_addr(hw_addr, dev) {
+ u32 bit_nr = eth_hw_addr_crc(hw_addr);
+ u32 idx = u32_get_bits(bit_nr, BIT(31));
+ u32 bit = u32_get_bits(bit_nr,
+ RTASE_MULTICAST_FILTER_MASK);
+
+ mc_filter[idx] |= BIT(bit);
+ rx_mode |= RTASE_ACCEPT_MULTICAST;
+ }
+ }
+
+ if (dev->features & NETIF_F_RXALL)
+ rx_mode |= RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT;
+
+ rtase_w32(tp, RTASE_MAR0, swab32(mc_filter[1]));
+ rtase_w32(tp, RTASE_MAR1, swab32(mc_filter[0]));
+ rtase_w16(tp, RTASE_RX_CONFIG_0, rx_mode);
+}
+
+static void rtase_irq_dis_and_clear(const struct rtase_private *tp)
+{
+ const struct rtase_int_vector *ivec = &tp->int_vector[0];
+ u32 val1;
+ u16 val2;
+ u8 i;
+
+ rtase_w32(tp, ivec->imr_addr, 0);
+ val1 = rtase_r32(tp, ivec->isr_addr);
+ rtase_w32(tp, ivec->isr_addr, val1);
+
+ for (i = 1; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ rtase_w16(tp, ivec->imr_addr, 0);
+ val2 = rtase_r16(tp, ivec->isr_addr);
+ rtase_w16(tp, ivec->isr_addr, val2);
+ }
+}
+
+static void rtase_poll_timeout(const struct rtase_private *tp, u32 cond,
+ u32 sleep_us, u64 timeout_us, u16 reg)
+{
+ int err;
+ u8 val;
+
+ err = read_poll_timeout(rtase_r8, val, val & cond, sleep_us,
+ timeout_us, false, tp, reg);
+
+ if (err == -ETIMEDOUT)
+ netdev_err(tp->dev, "poll reg 0x00%x timeout\n", reg);
+}
+
+static void rtase_nic_reset(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 rx_config;
+ u8 val;
+
+ rx_config = rtase_r16(tp, RTASE_RX_CONFIG_0);
+ rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config & ~RTASE_ACCEPT_MASK);
+
+ val = rtase_r8(tp, RTASE_MISC);
+ rtase_w8(tp, RTASE_MISC, val | RTASE_RX_DV_GATE_EN);
+
+ val = rtase_r8(tp, RTASE_CHIP_CMD);
+ rtase_w8(tp, RTASE_CHIP_CMD, val | RTASE_STOP_REQ);
+ mdelay(2);
+
+ rtase_poll_timeout(tp, RTASE_STOP_REQ_DONE, 100, 150000,
+ RTASE_CHIP_CMD);
+
+ rtase_poll_timeout(tp, RTASE_TX_FIFO_EMPTY, 100, 100000,
+ RTASE_FIFOR);
+
+ rtase_poll_timeout(tp, RTASE_RX_FIFO_EMPTY, 100, 100000,
+ RTASE_FIFOR);
+
+ val = rtase_r8(tp, RTASE_CHIP_CMD);
+ rtase_w8(tp, RTASE_CHIP_CMD, val & ~(RTASE_TE | RTASE_RE));
+ val = rtase_r8(tp, RTASE_CHIP_CMD);
+ rtase_w8(tp, RTASE_CHIP_CMD, val & ~RTASE_STOP_REQ);
+
+ rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config);
+}
+
+static void rtase_hw_reset(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+
+ rtase_irq_dis_and_clear(tp);
+
+ rtase_nic_reset(dev);
+}
+
+static void rtase_set_rx_queue(const struct rtase_private *tp)
+{
+ u16 reg_data;
+
+ reg_data = rtase_r16(tp, RTASE_FCR);
+ switch (tp->func_rx_queue_num) {
+ case 1:
+ u16p_replace_bits(&reg_data, 0x1, RTASE_FCR_RXQ_MASK);
+ break;
+ case 2:
+ u16p_replace_bits(&reg_data, 0x2, RTASE_FCR_RXQ_MASK);
+ break;
+ case 4:
+ u16p_replace_bits(&reg_data, 0x3, RTASE_FCR_RXQ_MASK);
+ break;
+ }
+ rtase_w16(tp, RTASE_FCR, reg_data);
+}
+
+static void rtase_set_tx_queue(const struct rtase_private *tp)
+{
+ u16 reg_data;
+
+ reg_data = rtase_r16(tp, RTASE_TX_CONFIG_1);
+ switch (tp->tx_queue_ctrl) {
+ case 1:
+ u16p_replace_bits(&reg_data, 0x0, RTASE_TC_MODE_MASK);
+ break;
+ case 2:
+ u16p_replace_bits(&reg_data, 0x1, RTASE_TC_MODE_MASK);
+ break;
+ case 3:
+ case 4:
+ u16p_replace_bits(&reg_data, 0x2, RTASE_TC_MODE_MASK);
+ break;
+ default:
+ u16p_replace_bits(&reg_data, 0x3, RTASE_TC_MODE_MASK);
+ break;
+ }
+ rtase_w16(tp, RTASE_TX_CONFIG_1, reg_data);
+}
+
+static void rtase_hw_config(struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u32 reg_data32;
+ u16 reg_data16;
+
+ rtase_hw_reset(dev);
+
+ /* set rx dma burst */
+ reg_data16 = rtase_r16(tp, RTASE_RX_CONFIG_0);
+ reg_data16 &= ~(RTASE_RX_SINGLE_TAG | RTASE_RX_SINGLE_FETCH);
+ u16p_replace_bits(&reg_data16, RTASE_RX_DMA_BURST_256,
+ RTASE_RX_MX_DMA_MASK);
+ rtase_w16(tp, RTASE_RX_CONFIG_0, reg_data16);
+
+ /* new rx descritpor */
+ reg_data16 = rtase_r16(tp, RTASE_RX_CONFIG_1);
+ reg_data16 |= RTASE_RX_NEW_DESC_FORMAT_EN | RTASE_PCIE_NEW_FLOW;
+ u16p_replace_bits(&reg_data16, 0xF, RTASE_RX_MAX_FETCH_DESC_MASK);
+ rtase_w16(tp, RTASE_RX_CONFIG_1, reg_data16);
+
+ rtase_set_rx_queue(tp);
+
+ rtase_interrupt_mitigation(tp);
+
+ /* set tx dma burst size and interframe gap time */
+ reg_data32 = rtase_r32(tp, RTASE_TX_CONFIG_0);
+ u32p_replace_bits(&reg_data32, RTASE_TX_DMA_BURST_UNLIMITED,
+ RTASE_TX_DMA_MASK);
+ u32p_replace_bits(&reg_data32, RTASE_INTERFRAMEGAP,
+ RTASE_TX_INTER_FRAME_GAP_MASK);
+ rtase_w32(tp, RTASE_TX_CONFIG_0, reg_data32);
+
+ /* new tx descriptor */
+ reg_data16 = rtase_r16(tp, RTASE_TFUN_CTRL);
+ rtase_w16(tp, RTASE_TFUN_CTRL, reg_data16 |
+ RTASE_TX_NEW_DESC_FORMAT_EN);
+
+ /* tx fetch desc number */
+ rtase_w8(tp, RTASE_TDFNR, 0x10);
+
+ /* tag num select */
+ reg_data16 = rtase_r16(tp, RTASE_MTPS);
+ u16p_replace_bits(&reg_data16, 0x4, RTASE_TAG_NUM_SEL_MASK);
+ rtase_w16(tp, RTASE_MTPS, reg_data16);
+
+ rtase_set_tx_queue(tp);
+
+ rtase_w16(tp, RTASE_TOKSEL, 0x5555);
+
+ rtase_tally_counter_addr_fill(tp);
+ rtase_desc_addr_fill(tp);
+ rtase_hw_set_features(dev, dev->features);
+
+ /* enable flow control */
+ reg_data16 = rtase_r16(tp, RTASE_CPLUS_CMD);
+ reg_data16 |= (RTASE_FORCE_TXFLOW_EN | RTASE_FORCE_RXFLOW_EN);
+ rtase_w16(tp, RTASE_CPLUS_CMD, reg_data16);
+ /* set near fifo threshold - rx missed issue. */
+ rtase_w16(tp, RTASE_RFIFONFULL, 0x190);
+
+ rtase_w16(tp, RTASE_RMS, tp->rx_buf_sz);
+
+ rtase_hw_set_rx_packet_filter(dev);
+}
+
+static void rtase_nic_enable(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 rcr = rtase_r16(tp, RTASE_RX_CONFIG_1);
+ u8 val;
+
+ rtase_w16(tp, RTASE_RX_CONFIG_1, rcr & ~RTASE_PCIE_RELOAD_EN);
+ rtase_w16(tp, RTASE_RX_CONFIG_1, rcr | RTASE_PCIE_RELOAD_EN);
+
+ val = rtase_r8(tp, RTASE_CHIP_CMD);
+ rtase_w8(tp, RTASE_CHIP_CMD, val | RTASE_TE | RTASE_RE);
+
+ val = rtase_r8(tp, RTASE_MISC);
+ rtase_w8(tp, RTASE_MISC, val & ~RTASE_RX_DV_GATE_EN);
+}
+
+static void rtase_enable_hw_interrupt(const struct rtase_private *tp)
+{
+ const struct rtase_int_vector *ivec = &tp->int_vector[0];
+ u32 i;
+
+ rtase_w32(tp, ivec->imr_addr, ivec->imr);
+
+ for (i = 1; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ rtase_w16(tp, ivec->imr_addr, ivec->imr);
+ }
+}
+
+static void rtase_hw_start(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+
+ rtase_nic_enable(dev);
+ rtase_enable_hw_interrupt(tp);
+}
+
+/* the interrupt handler does RXQ0 and TXQ0, TXQ4~7 interrutp status
+ */
+static irqreturn_t rtase_interrupt(int irq, void *dev_instance)
+{
+ const struct rtase_private *tp;
+ struct rtase_int_vector *ivec;
+ u32 status;
+
+ ivec = dev_instance;
+ tp = ivec->tp;
+ status = rtase_r32(tp, ivec->isr_addr);
+
+ rtase_w32(tp, ivec->imr_addr, 0x0);
+ rtase_w32(tp, ivec->isr_addr, status & ~RTASE_FOVW);
+
+ if (napi_schedule_prep(&ivec->napi))
+ __napi_schedule(&ivec->napi);
+
+ return IRQ_HANDLED;
+}
+
+/* the interrupt handler does RXQ1&TXQ1 or RXQ2&TXQ2 or RXQ3&TXQ3 interrupt
+ * status according to interrupt vector
+ */
+static irqreturn_t rtase_q_interrupt(int irq, void *dev_instance)
+{
+ const struct rtase_private *tp;
+ struct rtase_int_vector *ivec;
+ u16 status;
+
+ ivec = dev_instance;
+ tp = ivec->tp;
+ status = rtase_r16(tp, ivec->isr_addr);
+
+ rtase_w16(tp, ivec->imr_addr, 0x0);
+ rtase_w16(tp, ivec->isr_addr, status);
+
+ if (napi_schedule_prep(&ivec->napi))
+ __napi_schedule(&ivec->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int rtase_poll(struct napi_struct *napi, int budget)
+{
+ const struct rtase_int_vector *ivec;
+ const struct rtase_private *tp;
+ struct rtase_ring *ring;
+ int total_workdone = 0;
+
+ ivec = container_of(napi, struct rtase_int_vector, napi);
+ tp = ivec->tp;
+
+ list_for_each_entry(ring, &ivec->ring_list, ring_entry)
+ total_workdone += ring->ring_handler(ring, budget);
+
+ if (total_workdone >= budget)
+ return budget;
+
+ if (napi_complete_done(napi, total_workdone)) {
+ if (!ivec->index)
+ rtase_w32(tp, ivec->imr_addr, ivec->imr);
+ else
+ rtase_w16(tp, ivec->imr_addr, ivec->imr);
+ }
+
+ return total_workdone;
+}
+
+static int rtase_open(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ const struct pci_dev *pdev = tp->pdev;
+ struct rtase_int_vector *ivec;
+ u16 i = 0, j;
+ int ret;
+
+ ivec = &tp->int_vector[0];
+ tp->rx_buf_sz = RTASE_RX_BUF_SIZE;
+
+ ret = rtase_alloc_desc(tp);
+ if (ret)
+ return ret;
+
+ ret = rtase_init_ring(dev);
+ if (ret)
+ goto err_free_all_allocated_mem;
+
+ rtase_hw_config(dev);
+
+ if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
+ ret = request_irq(ivec->irq, rtase_interrupt, 0,
+ dev->name, ivec);
+ if (ret)
+ goto err_free_all_allocated_irq;
+
+ /* request other interrupts to handle multiqueue */
+ for (i = 1; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ snprintf(ivec->name, sizeof(ivec->name), "%s_int%i",
+ tp->dev->name, i);
+ ret = request_irq(ivec->irq, rtase_q_interrupt, 0,
+ ivec->name, ivec);
+ if (ret)
+ goto err_free_all_allocated_irq;
+ }
+ } else {
+ ret = request_irq(pdev->irq, rtase_interrupt, 0, dev->name,
+ ivec);
+ if (ret)
+ goto err_free_all_allocated_mem;
+ }
+
+ rtase_hw_start(dev);
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ napi_enable(&ivec->napi);
+ }
+
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+
+ return 0;
+
+err_free_all_allocated_irq:
+ for (j = 0; j < i; j++)
+ free_irq(tp->int_vector[j].irq, &tp->int_vector[j]);
+
+err_free_all_allocated_mem:
+ rtase_free_desc(tp);
+
+ return ret;
+}
+
+static void rtase_down(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_int_vector *ivec;
+ struct rtase_ring *ring, *tmp;
+ u32 i;
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ napi_disable(&ivec->napi);
+ list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
+ ring_entry)
+ list_del(&ring->ring_entry);
+ }
+
+ netif_tx_disable(dev);
+
+ netif_carrier_off(dev);
+
+ rtase_hw_reset(dev);
+
+ rtase_tx_clear(tp);
+
+ rtase_rx_clear(tp);
+}
+
+static int rtase_close(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ const struct pci_dev *pdev = tp->pdev;
+ u32 i;
+
+ rtase_down(dev);
+
+ if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
+ for (i = 0; i < tp->int_nums; i++)
+ free_irq(tp->int_vector[i].irq, &tp->int_vector[i]);
+
+ } else {
+ free_irq(pdev->irq, &tp->int_vector[0]);
+ }
+
+ rtase_free_desc(tp);
+
+ return 0;
+}
+
+static u32 rtase_tx_vlan_tag(const struct rtase_private *tp,
+ const struct sk_buff *skb)
+{
+ return (skb_vlan_tag_present(skb)) ?
+ (RTASE_TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb))) : 0x00;
+}
+
+static u32 rtase_tx_csum(struct sk_buff *skb, const struct net_device *dev)
+{
+ u32 csum_cmd = 0;
+ u8 ip_protocol;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ csum_cmd = RTASE_TX_IPCS_C;
+ ip_protocol = ip_hdr(skb)->protocol;
+ break;
+
+ case htons(ETH_P_IPV6):
+ csum_cmd = RTASE_TX_IPV6F_C;
+ ip_protocol = ipv6_hdr(skb)->nexthdr;
+ break;
+
+ default:
+ ip_protocol = IPPROTO_RAW;
+ break;
+ }
+
+ if (ip_protocol == IPPROTO_TCP)
+ csum_cmd |= RTASE_TX_TCPCS_C;
+ else if (ip_protocol == IPPROTO_UDP)
+ csum_cmd |= RTASE_TX_UDPCS_C;
+
+ csum_cmd |= u32_encode_bits(skb_transport_offset(skb),
+ RTASE_TCPHO_MASK);
+
+ return csum_cmd;
+}
+
+static int rtase_xmit_frags(struct rtase_ring *ring, struct sk_buff *skb,
+ u32 opts1, u32 opts2)
+{
+ const struct skb_shared_info *info = skb_shinfo(skb);
+ const struct rtase_private *tp = ring->ivec->tp;
+ const u8 nr_frags = info->nr_frags;
+ struct rtase_tx_desc *txd = NULL;
+ u32 cur_frag, entry;
+
+ entry = ring->cur_idx;
+ for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
+ const skb_frag_t *frag = &info->frags[cur_frag];
+ dma_addr_t mapping;
+ u32 status, len;
+ void *addr;
+
+ entry = (entry + 1) % RTASE_NUM_DESC;
+
+ txd = ring->desc + sizeof(struct rtase_tx_desc) * entry;
+ len = skb_frag_size(frag);
+ addr = skb_frag_address(frag);
+ mapping = dma_map_single(&tp->pdev->dev, addr, len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
+ if (unlikely(net_ratelimit()))
+ netdev_err(tp->dev,
+ "Failed to map TX fragments DMA!\n");
+
+ goto err_out;
+ }
+
+ if (((entry + 1) % RTASE_NUM_DESC) == 0)
+ status = (opts1 | len | RTASE_RING_END);
+ else
+ status = opts1 | len;
+
+ if (cur_frag == (nr_frags - 1)) {
+ ring->skbuff[entry] = skb;
+ status |= RTASE_TX_LAST_FRAG;
+ }
+
+ ring->mis.len[entry] = len;
+ txd->addr = cpu_to_le64(mapping);
+ txd->opts2 = cpu_to_le32(opts2);
+
+ /* make sure the operating fields have been updated */
+ dma_wmb();
+ txd->opts1 = cpu_to_le32(status);
+ }
+
+ return cur_frag;
+
+err_out:
+ rtase_tx_clear_range(ring, ring->cur_idx + 1, cur_frag);
+ return -EIO;
+}
+
+static netdev_tx_t rtase_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct rtase_private *tp = netdev_priv(dev);
+ u32 q_idx, entry, len, opts1, opts2;
+ struct netdev_queue *tx_queue;
+ bool stop_queue, door_bell;
+ u32 mss = shinfo->gso_size;
+ struct rtase_tx_desc *txd;
+ struct rtase_ring *ring;
+ dma_addr_t mapping;
+ int frags;
+
+ /* multiqueues */
+ q_idx = skb_get_queue_mapping(skb);
+ ring = &tp->tx_ring[q_idx];
+ tx_queue = netdev_get_tx_queue(dev, q_idx);
+
+ if (unlikely(!rtase_tx_avail(ring))) {
+ if (net_ratelimit())
+ netdev_err(dev,
+ "BUG! Tx Ring full when queue awake!\n");
+
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = ring->cur_idx % RTASE_NUM_DESC;
+ txd = ring->desc + sizeof(struct rtase_tx_desc) * entry;
+
+ opts1 = RTASE_DESC_OWN;
+ opts2 = rtase_tx_vlan_tag(tp, skb);
+
+ /* tcp segmentation offload (or tcp large send) */
+ if (mss) {
+ if (shinfo->gso_type & SKB_GSO_TCPV4) {
+ opts1 |= RTASE_GIANT_SEND_V4;
+ } else if (shinfo->gso_type & SKB_GSO_TCPV6) {
+ if (skb_cow_head(skb, 0))
+ goto err_dma_0;
+
+ tcp_v6_gso_csum_prep(skb);
+ opts1 |= RTASE_GIANT_SEND_V6;
+ } else {
+ WARN_ON_ONCE(1);
+ }
+
+ opts1 |= u32_encode_bits(skb_transport_offset(skb),
+ RTASE_TCPHO_MASK);
+ opts2 |= u32_encode_bits(mss, RTASE_MSS_MASK);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ opts2 |= rtase_tx_csum(skb, dev);
+ }
+
+ frags = rtase_xmit_frags(ring, skb, opts1, opts2);
+ if (unlikely(frags < 0))
+ goto err_dma_0;
+
+ if (frags) {
+ len = skb_headlen(skb);
+ opts1 |= RTASE_TX_FIRST_FRAG;
+ } else {
+ len = skb->len;
+ ring->skbuff[entry] = skb;
+ opts1 |= RTASE_TX_FIRST_FRAG | RTASE_TX_LAST_FRAG;
+ }
+
+ if (((entry + 1) % RTASE_NUM_DESC) == 0)
+ opts1 |= (len | RTASE_RING_END);
+ else
+ opts1 |= len;
+
+ mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
+ if (unlikely(net_ratelimit()))
+ netdev_err(dev, "Failed to map TX DMA!\n");
+
+ goto err_dma_1;
+ }
+
+ ring->mis.len[entry] = len;
+ txd->addr = cpu_to_le64(mapping);
+ txd->opts2 = cpu_to_le32(opts2);
+ txd->opts1 = cpu_to_le32(opts1 & ~RTASE_DESC_OWN);
+
+ /* make sure the operating fields have been updated */
+ dma_wmb();
+
+ door_bell = __netdev_tx_sent_queue(tx_queue, skb->len,
+ netdev_xmit_more());
+
+ txd->opts1 = cpu_to_le32(opts1);
+
+ skb_tx_timestamp(skb);
+
+ /* tx needs to see descriptor changes before updated cur_idx */
+ smp_wmb();
+
+ WRITE_ONCE(ring->cur_idx, ring->cur_idx + frags + 1);
+
+ stop_queue = !netif_subqueue_maybe_stop(dev, ring->index,
+ rtase_tx_avail(ring),
+ RTASE_TX_STOP_THRS,
+ RTASE_TX_START_THRS);
+
+ if (door_bell || stop_queue)
+ rtase_w8(tp, RTASE_TPPOLL, BIT(ring->index));
+
+ return NETDEV_TX_OK;
+
+err_dma_1:
+ ring->skbuff[entry] = NULL;
+ rtase_tx_clear_range(ring, ring->cur_idx + 1, frags);
+
+err_dma_0:
+ tp->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void rtase_set_rx_mode(struct net_device *dev)
+{
+ rtase_hw_set_rx_packet_filter(dev);
+}
+
+static void rtase_enable_eem_write(const struct rtase_private *tp)
+{
+ u8 val;
+
+ val = rtase_r8(tp, RTASE_EEM);
+ rtase_w8(tp, RTASE_EEM, val | RTASE_EEM_UNLOCK);
+}
+
+static void rtase_disable_eem_write(const struct rtase_private *tp)
+{
+ u8 val;
+
+ val = rtase_r8(tp, RTASE_EEM);
+ rtase_w8(tp, RTASE_EEM, val & ~RTASE_EEM_UNLOCK);
+}
+
+static void rtase_rar_set(const struct rtase_private *tp, const u8 *addr)
+{
+ u32 rar_low, rar_high;
+
+ rar_low = (u32)addr[0] | ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) | ((u32)addr[3] << 24);
+
+ rar_high = (u32)addr[4] | ((u32)addr[5] << 8);
+
+ rtase_enable_eem_write(tp);
+ rtase_w32(tp, RTASE_MAC0, rar_low);
+ rtase_w32(tp, RTASE_MAC4, rar_high);
+ rtase_disable_eem_write(tp);
+ rtase_w16(tp, RTASE_LBK_CTRL, RTASE_LBK_ATLD | RTASE_LBK_CLR);
+}
+
+static int rtase_set_mac_address(struct net_device *dev, void *p)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ int ret;
+
+ ret = eth_mac_addr(dev, p);
+ if (ret)
+ return ret;
+
+ rtase_rar_set(tp, dev->dev_addr);
+
+ return 0;
+}
+
+static int rtase_change_mtu(struct net_device *dev, int new_mtu)
+{
+ dev->mtu = new_mtu;
+
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static void rtase_wait_for_quiescence(const struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_int_vector *ivec;
+ u32 i;
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ synchronize_irq(ivec->irq);
+ /* wait for any pending NAPI task to complete */
+ napi_disable(&ivec->napi);
+ }
+
+ rtase_irq_dis_and_clear(tp);
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ napi_enable(&ivec->napi);
+ }
+}
+
+static void rtase_sw_reset(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ int ret;
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ rtase_hw_reset(dev);
+
+ /* let's wait a bit while any (async) irq lands on */
+ rtase_wait_for_quiescence(dev);
+ rtase_tx_clear(tp);
+ rtase_rx_clear(tp);
+
+ ret = rtase_init_ring(dev);
+ if (ret) {
+ netdev_err(dev, "unable to init ring\n");
+ rtase_free_desc(tp);
+ return;
+ }
+
+ rtase_hw_config(dev);
+ /* always link, so start to transmit & receive */
+ rtase_hw_start(dev);
+
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+}
+
+static void rtase_dump_tally_counter(const struct rtase_private *tp)
+{
+ dma_addr_t paddr = tp->tally_paddr;
+ u32 cmd = lower_32_bits(paddr);
+ u32 val;
+ int err;
+
+ rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(paddr));
+ rtase_w32(tp, RTASE_DTCCR0, cmd);
+ rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_DUMP);
+
+ err = read_poll_timeout(rtase_r32, val, !(val & RTASE_COUNTER_DUMP),
+ 10, 250, false, tp, RTASE_DTCCR0);
+
+ if (err == -ETIMEDOUT)
+ netdev_err(tp->dev, "error occurred in dump tally counter\n");
+}
+
+static void rtase_dump_state(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ int max_reg_size = RTASE_PCI_REGS_SIZE;
+ const struct rtase_counters *counters;
+ const struct rtase_ring *ring;
+ u32 dword_rd;
+ int n = 0;
+
+ ring = &tp->tx_ring[0];
+ netdev_err(dev, "Tx descriptor info:\n");
+ netdev_err(dev, "Tx curIdx = 0x%x\n", ring->cur_idx);
+ netdev_err(dev, "Tx dirtyIdx = 0x%x\n", ring->dirty_idx);
+ netdev_err(dev, "Tx phyAddr = %pad\n", &ring->phy_addr);
+
+ ring = &tp->rx_ring[0];
+ netdev_err(dev, "Rx descriptor info:\n");
+ netdev_err(dev, "Rx curIdx = 0x%x\n", ring->cur_idx);
+ netdev_err(dev, "Rx dirtyIdx = 0x%x\n", ring->dirty_idx);
+ netdev_err(dev, "Rx phyAddr = %pad\n", &ring->phy_addr);
+
+ netdev_err(dev, "Device Registers:\n");
+ netdev_err(dev, "Chip Command = 0x%02x\n",
+ rtase_r8(tp, RTASE_CHIP_CMD));
+ netdev_err(dev, "IMR = %08x\n", rtase_r32(tp, RTASE_IMR0));
+ netdev_err(dev, "ISR = %08x\n", rtase_r32(tp, RTASE_ISR0));
+ netdev_err(dev, "Boot Ctrl Reg(0xE004) = %04x\n",
+ rtase_r16(tp, RTASE_BOOT_CTL));
+ netdev_err(dev, "EPHY ISR(0xE014) = %04x\n",
+ rtase_r16(tp, RTASE_EPHY_ISR));
+ netdev_err(dev, "EPHY IMR(0xE016) = %04x\n",
+ rtase_r16(tp, RTASE_EPHY_IMR));
+ netdev_err(dev, "CLKSW SET REG(0xE018) = %04x\n",
+ rtase_r16(tp, RTASE_CLKSW_SET));
+
+ netdev_err(dev, "Dump PCI Registers:\n");
+
+ while (n < max_reg_size) {
+ if ((n % RTASE_DWORD_MOD) == 0)
+ netdev_err(tp->dev, "0x%03x:\n", n);
+
+ pci_read_config_dword(tp->pdev, n, &dword_rd);
+ netdev_err(tp->dev, "%08x\n", dword_rd);
+ n += 4;
+ }
+
+ netdev_err(dev, "Dump tally counter:\n");
+ counters = tp->tally_vaddr;
+ rtase_dump_tally_counter(tp);
+
+ netdev_err(dev, "tx_packets %lld\n",
+ le64_to_cpu(counters->tx_packets));
+ netdev_err(dev, "rx_packets %lld\n",
+ le64_to_cpu(counters->rx_packets));
+ netdev_err(dev, "tx_errors %lld\n",
+ le64_to_cpu(counters->tx_errors));
+ netdev_err(dev, "rx_errors %d\n",
+ le32_to_cpu(counters->rx_errors));
+ netdev_err(dev, "rx_missed %d\n",
+ le16_to_cpu(counters->rx_missed));
+ netdev_err(dev, "align_errors %d\n",
+ le16_to_cpu(counters->align_errors));
+ netdev_err(dev, "tx_one_collision %d\n",
+ le32_to_cpu(counters->tx_one_collision));
+ netdev_err(dev, "tx_multi_collision %d\n",
+ le32_to_cpu(counters->tx_multi_collision));
+ netdev_err(dev, "rx_unicast %lld\n",
+ le64_to_cpu(counters->rx_unicast));
+ netdev_err(dev, "rx_broadcast %lld\n",
+ le64_to_cpu(counters->rx_broadcast));
+ netdev_err(dev, "rx_multicast %d\n",
+ le32_to_cpu(counters->rx_multicast));
+ netdev_err(dev, "tx_aborted %d\n",
+ le16_to_cpu(counters->tx_aborted));
+ netdev_err(dev, "tx_underrun %d\n",
+ le16_to_cpu(counters->tx_underrun));
+}
+
+static void rtase_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ rtase_dump_state(dev);
+ rtase_sw_reset(dev);
+}
+
+static void rtase_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ const struct rtase_counters *counters;
+
+ counters = tp->tally_vaddr;
+
+ dev_fetch_sw_netstats(stats, dev->tstats);
+
+ /* fetch additional counter values missing in stats collected by driver
+ * from tally counter
+ */
+ rtase_dump_tally_counter(tp);
+ stats->rx_errors = tp->stats.rx_errors;
+ stats->tx_errors = le64_to_cpu(counters->tx_errors);
+ stats->rx_dropped = tp->stats.rx_dropped;
+ stats->tx_dropped = tp->stats.tx_dropped;
+ stats->multicast = tp->stats.multicast;
+ stats->rx_length_errors = tp->stats.rx_length_errors;
+}
+
+static netdev_features_t rtase_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t features_fix = features;
+
+ /* not support TSO for jumbo frames */
+ if (dev->mtu > ETH_DATA_LEN)
+ features_fix &= ~NETIF_F_ALL_TSO;
+
+ return features_fix;
+}
+
+static int rtase_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t features_set = features;
+
+ features_set &= NETIF_F_RXALL | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (features_set ^ dev->features)
+ rtase_hw_set_features(dev, features_set);
+
+ return 0;
+}
+
+static const struct net_device_ops rtase_netdev_ops = {
+ .ndo_open = rtase_open,
+ .ndo_stop = rtase_close,
+ .ndo_start_xmit = rtase_start_xmit,
+ .ndo_set_rx_mode = rtase_set_rx_mode,
+ .ndo_set_mac_address = rtase_set_mac_address,
+ .ndo_change_mtu = rtase_change_mtu,
+ .ndo_tx_timeout = rtase_tx_timeout,
+ .ndo_get_stats64 = rtase_get_stats64,
+ .ndo_fix_features = rtase_fix_features,
+ .ndo_set_features = rtase_set_features,
+};
+
+static void rtase_get_mac_address(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ u8 mac_addr[ETH_ALEN] __aligned(2) = {};
+ u32 i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = rtase_r8(tp, RTASE_MAC0 + i);
+
+ if (!is_valid_ether_addr(mac_addr)) {
+ eth_hw_addr_random(dev);
+ netdev_warn(dev, "Random ether addr %pM\n", dev->dev_addr);
+ } else {
+ eth_hw_addr_set(dev, mac_addr);
+ ether_addr_copy(dev->perm_addr, dev->dev_addr);
+ }
+
+ rtase_rar_set(tp, dev->dev_addr);
+}
+
+static int rtase_get_settings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ u32 supported = SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ cmd->base.speed = SPEED_5000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_MII;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static void rtase_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 value = rtase_r16(tp, RTASE_CPLUS_CMD);
+
+ pause->autoneg = AUTONEG_DISABLE;
+ pause->tx_pause = !!(value & RTASE_FORCE_TXFLOW_EN);
+ pause->rx_pause = !!(value & RTASE_FORCE_RXFLOW_EN);
+}
+
+static int rtase_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 value = rtase_r16(tp, RTASE_CPLUS_CMD);
+
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ value &= ~(RTASE_FORCE_TXFLOW_EN | RTASE_FORCE_RXFLOW_EN);
+
+ if (pause->tx_pause)
+ value |= RTASE_FORCE_TXFLOW_EN;
+
+ if (pause->rx_pause)
+ value |= RTASE_FORCE_RXFLOW_EN;
+
+ rtase_w16(tp, RTASE_CPLUS_CMD, value);
+ return 0;
+}
+
+static void rtase_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *stats)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ const struct rtase_counters *counters;
+
+ counters = tp->tally_vaddr;
+
+ rtase_dump_tally_counter(tp);
+
+ stats->FramesTransmittedOK = le64_to_cpu(counters->tx_packets);
+ stats->FramesReceivedOK = le64_to_cpu(counters->rx_packets);
+ stats->FramesLostDueToIntMACXmitError =
+ le64_to_cpu(counters->tx_errors);
+ stats->BroadcastFramesReceivedOK = le64_to_cpu(counters->rx_broadcast);
+}
+
+static const struct ethtool_ops rtase_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = rtase_get_settings,
+ .get_pauseparam = rtase_get_pauseparam,
+ .set_pauseparam = rtase_set_pauseparam,
+ .get_eth_mac_stats = rtase_get_eth_mac_stats,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static void rtase_init_netdev_ops(struct net_device *dev)
+{
+ dev->netdev_ops = &rtase_netdev_ops;
+ dev->ethtool_ops = &rtase_ethtool_ops;
+}
+
+static void rtase_reset_interrupt(struct pci_dev *pdev,
+ const struct rtase_private *tp)
+{
+ if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED)
+ pci_disable_msix(pdev);
+ else
+ pci_disable_msi(pdev);
+}
+
+static int rtase_alloc_msix(struct pci_dev *pdev, struct rtase_private *tp)
+{
+ int ret, irq;
+ u16 i;
+
+ memset(tp->msix_entry, 0x0, RTASE_NUM_MSIX *
+ sizeof(struct msix_entry));
+
+ for (i = 0; i < RTASE_NUM_MSIX; i++)
+ tp->msix_entry[i].entry = i;
+
+ ret = pci_enable_msix_exact(pdev, tp->msix_entry, tp->int_nums);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < tp->int_nums; i++) {
+ irq = pci_irq_vector(pdev, i);
+ if (!irq) {
+ pci_disable_msix(pdev);
+ return irq;
+ }
+
+ tp->int_vector[i].irq = irq;
+ }
+
+ return 0;
+}
+
+static int rtase_alloc_interrupt(struct pci_dev *pdev,
+ struct rtase_private *tp)
+{
+ int ret;
+
+ ret = rtase_alloc_msix(pdev, tp);
+ if (ret) {
+ ret = pci_enable_msi(pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to alloc interrupt.(MSI)\n");
+ return ret;
+ }
+
+ tp->sw_flag |= RTASE_SWF_MSI_ENABLED;
+ } else {
+ tp->sw_flag |= RTASE_SWF_MSIX_ENABLED;
+ }
+
+ return 0;
+}
+
+static void rtase_init_hardware(const struct rtase_private *tp)
+{
+ u16 i;
+
+ for (i = 0; i < RTASE_VLAN_FILTER_ENTRY_NUM; i++)
+ rtase_w32(tp, RTASE_VLAN_ENTRY_0 + i * 4, 0);
+}
+
+static void rtase_init_int_vector(struct rtase_private *tp)
+{
+ u16 i;
+
+ /* interrupt vector 0 */
+ tp->int_vector[0].tp = tp;
+ tp->int_vector[0].index = 0;
+ tp->int_vector[0].imr_addr = RTASE_IMR0;
+ tp->int_vector[0].isr_addr = RTASE_ISR0;
+ tp->int_vector[0].imr = RTASE_ROK | RTASE_RDU | RTASE_TOK |
+ RTASE_TOK4 | RTASE_TOK5 | RTASE_TOK6 |
+ RTASE_TOK7;
+ tp->int_vector[0].poll = rtase_poll;
+
+ memset(tp->int_vector[0].name, 0x0, sizeof(tp->int_vector[0].name));
+ INIT_LIST_HEAD(&tp->int_vector[0].ring_list);
+
+ netif_napi_add(tp->dev, &tp->int_vector[0].napi,
+ tp->int_vector[0].poll);
+
+ /* interrupt vector 1 ~ 3 */
+ for (i = 1; i < tp->int_nums; i++) {
+ tp->int_vector[i].tp = tp;
+ tp->int_vector[i].index = i;
+ tp->int_vector[i].imr_addr = RTASE_IMR1 + (i - 1) * 4;
+ tp->int_vector[i].isr_addr = RTASE_ISR1 + (i - 1) * 4;
+ tp->int_vector[i].imr = RTASE_Q_ROK | RTASE_Q_RDU |
+ RTASE_Q_TOK;
+ tp->int_vector[i].poll = rtase_poll;
+
+ memset(tp->int_vector[i].name, 0x0,
+ sizeof(tp->int_vector[0].name));
+ INIT_LIST_HEAD(&tp->int_vector[i].ring_list);
+
+ netif_napi_add(tp->dev, &tp->int_vector[i].napi,
+ tp->int_vector[i].poll);
+ }
+}
+
+static u16 rtase_calc_time_mitigation(u32 time_us)
+{
+ u8 msb, time_count, time_unit;
+ u16 int_miti;
+
+ time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME);
+
+ msb = fls(time_us);
+ if (msb >= RTASE_MITI_COUNT_BIT_NUM) {
+ time_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
+ time_count = time_us >> (msb - RTASE_MITI_COUNT_BIT_NUM);
+ } else {
+ time_unit = 0;
+ time_count = time_us;
+ }
+
+ int_miti = u16_encode_bits(time_count, RTASE_MITI_TIME_COUNT_MASK) |
+ u16_encode_bits(time_unit, RTASE_MITI_TIME_UNIT_MASK);
+
+ return int_miti;
+}
+
+static u16 rtase_calc_packet_num_mitigation(u16 pkt_num)
+{
+ u8 msb, pkt_num_count, pkt_num_unit;
+ u16 int_miti;
+
+ pkt_num = min_t(int, pkt_num, RTASE_MITI_MAX_PKT_NUM);
+
+ if (pkt_num > 60) {
+ pkt_num_unit = RTASE_MITI_MAX_PKT_NUM_IDX;
+ pkt_num_count = pkt_num / RTASE_MITI_MAX_PKT_NUM_UNIT;
+ } else {
+ msb = fls(pkt_num);
+ if (msb >= RTASE_MITI_COUNT_BIT_NUM) {
+ pkt_num_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
+ pkt_num_count = pkt_num >> (msb -
+ RTASE_MITI_COUNT_BIT_NUM);
+ } else {
+ pkt_num_unit = 0;
+ pkt_num_count = pkt_num;
+ }
+ }
+
+ int_miti = u16_encode_bits(pkt_num_count,
+ RTASE_MITI_PKT_NUM_COUNT_MASK) |
+ u16_encode_bits(pkt_num_unit,
+ RTASE_MITI_PKT_NUM_UNIT_MASK);
+
+ return int_miti;
+}
+
+static void rtase_init_software_variable(struct pci_dev *pdev,
+ struct rtase_private *tp)
+{
+ u16 int_miti;
+
+ tp->tx_queue_ctrl = RTASE_TXQ_CTRL;
+ tp->func_tx_queue_num = RTASE_FUNC_TXQ_NUM;
+ tp->func_rx_queue_num = RTASE_FUNC_RXQ_NUM;
+ tp->int_nums = RTASE_INTERRUPT_NUM;
+
+ int_miti = rtase_calc_time_mitigation(RTASE_MITI_DEFAULT_TIME) |
+ rtase_calc_packet_num_mitigation(RTASE_MITI_DEFAULT_PKT_NUM);
+ tp->tx_int_mit = int_miti;
+ tp->rx_int_mit = int_miti;
+
+ tp->sw_flag = 0;
+
+ rtase_init_int_vector(tp);
+
+ /* MTU range: 60 - hw-specific max */
+ tp->dev->min_mtu = ETH_ZLEN;
+ tp->dev->max_mtu = RTASE_MAX_JUMBO_SIZE;
+}
+
+static bool rtase_check_mac_version_valid(struct rtase_private *tp)
+{
+ u32 hw_ver = rtase_r32(tp, RTASE_TX_CONFIG_0) & RTASE_HW_VER_MASK;
+ bool known_ver = false;
+
+ switch (hw_ver) {
+ case 0x00800000:
+ case 0x04000000:
+ case 0x04800000:
+ known_ver = true;
+ break;
+ }
+
+ return known_ver;
+}
+
+static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
+ void __iomem **ioaddr_out)
+{
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ int ret = -ENOMEM;
+
+ /* dev zeroed in alloc_etherdev */
+ dev = alloc_etherdev_mq(sizeof(struct rtase_private),
+ RTASE_FUNC_TXQ_NUM);
+ if (!dev)
+ goto err_out;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0)
+ goto err_out_free_dev;
+
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ ret = -ENODEV;
+ goto err_out_disable;
+ }
+
+ /* check for weird/broken PCI region reporting */
+ if (pci_resource_len(pdev, 2) < RTASE_REGS_SIZE) {
+ ret = -ENODEV;
+ goto err_out_disable;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret < 0)
+ goto err_out_disable;
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "no usable dma addressing method\n");
+ goto err_out_free_res;
+ }
+
+ pci_set_master(pdev);
+
+ /* ioremap MMIO region */
+ ioaddr = ioremap(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ if (!ioaddr) {
+ ret = -EIO;
+ goto err_out_free_res;
+ }
+
+ *ioaddr_out = ioaddr;
+ *dev_out = dev;
+
+ return ret;
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable:
+ pci_disable_device(pdev);
+
+err_out_free_dev:
+ free_netdev(dev);
+
+err_out:
+ *ioaddr_out = NULL;
+ *dev_out = NULL;
+
+ return ret;
+}
+
+static void rtase_release_board(struct pci_dev *pdev, struct net_device *dev,
+ void __iomem *ioaddr)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+
+ rtase_rar_set(tp, tp->dev->perm_addr);
+ iounmap(ioaddr);
+
+ if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED)
+ pci_disable_msix(pdev);
+ else
+ pci_disable_msi(pdev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ free_netdev(dev);
+}
+
+static int rtase_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct rtase_int_vector *ivec;
+ void __iomem *ioaddr = NULL;
+ struct rtase_private *tp;
+ int ret, i;
+
+ if (!pdev->is_physfn && pdev->is_virtfn) {
+ dev_err(&pdev->dev,
+ "This module does not support a virtual function.");
+ return -EINVAL;
+ }
+
+ dev_dbg(&pdev->dev, "Automotive Switch Ethernet driver loaded\n");
+
+ ret = rtase_init_board(pdev, &dev, &ioaddr);
+ if (ret != 0)
+ return ret;
+
+ tp = netdev_priv(dev);
+ tp->mmio_addr = ioaddr;
+ tp->dev = dev;
+ tp->pdev = pdev;
+
+ /* identify chip attached to board */
+ if (!rtase_check_mac_version_valid(tp))
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "unknown chip version, contact rtase maintainers (see MAINTAINERS file)\n");
+
+ rtase_init_software_variable(pdev, tp);
+ rtase_init_hardware(tp);
+
+ ret = rtase_alloc_interrupt(pdev, tp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n");
+ goto err_out_1;
+ }
+
+ rtase_init_netdev_ops(dev);
+
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_IP_CSUM | NETIF_F_HIGHDMA |
+ NETIF_F_RXCSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO6;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_TSO | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_RXALL | NETIF_F_RXFCS |
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HIGHDMA;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ netif_set_tso_max_size(dev, RTASE_LSO_64K);
+ netif_set_tso_max_segs(dev, RTASE_NIC_MAX_PHYS_BUF_COUNT_LSO2);
+
+ rtase_get_mac_address(dev);
+
+ tp->tally_vaddr = dma_alloc_coherent(&pdev->dev,
+ sizeof(*tp->tally_vaddr),
+ &tp->tally_paddr,
+ GFP_KERNEL);
+ if (!tp->tally_vaddr) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ rtase_tally_counter_clear(tp);
+
+ pci_set_drvdata(pdev, dev);
+
+ netif_carrier_off(dev);
+
+ ret = register_netdev(dev);
+ if (ret != 0)
+ goto err_out;
+
+ netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq);
+
+ return 0;
+
+err_out:
+ if (tp->tally_vaddr) {
+ dma_free_coherent(&pdev->dev,
+ sizeof(*tp->tally_vaddr),
+ tp->tally_vaddr,
+ tp->tally_paddr);
+
+ tp->tally_vaddr = NULL;
+ }
+
+err_out_1:
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ netif_napi_del(&ivec->napi);
+ }
+
+ rtase_release_board(pdev, dev, ioaddr);
+
+ return ret;
+}
+
+static void rtase_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_int_vector *ivec;
+ u32 i;
+
+ unregister_netdev(dev);
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ netif_napi_del(&ivec->napi);
+ }
+
+ rtase_reset_interrupt(pdev, tp);
+ if (tp->tally_vaddr) {
+ dma_free_coherent(&pdev->dev,
+ sizeof(*tp->tally_vaddr),
+ tp->tally_vaddr,
+ tp->tally_paddr);
+ tp->tally_vaddr = NULL;
+ }
+
+ rtase_release_board(pdev, dev, tp->mmio_addr);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void rtase_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ const struct rtase_private *tp;
+
+ tp = netdev_priv(dev);
+
+ if (netif_running(dev))
+ rtase_close(dev);
+
+ rtase_reset_interrupt(pdev, tp);
+}
+
+static int rtase_suspend(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ rtase_hw_reset(dev);
+ }
+
+ return 0;
+}
+
+static int rtase_resume(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct rtase_private *tp = netdev_priv(dev);
+ int ret;
+
+ /* restore last modified mac address */
+ rtase_rar_set(tp, dev->dev_addr);
+
+ if (!netif_running(dev))
+ goto out;
+
+ rtase_wait_for_quiescence(dev);
+
+ rtase_tx_clear(tp);
+ rtase_rx_clear(tp);
+
+ ret = rtase_init_ring(dev);
+ if (ret) {
+ netdev_err(dev, "unable to init ring\n");
+ rtase_free_desc(tp);
+ return -ENOMEM;
+ }
+
+ rtase_hw_config(dev);
+ /* always link, so start to transmit & receive */
+ rtase_hw_start(dev);
+
+ netif_device_attach(dev);
+out:
+
+ return 0;
+}
+
+static const struct dev_pm_ops rtase_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(rtase_suspend, rtase_resume)
+};
+
+static struct pci_driver rtase_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = rtase_pci_tbl,
+ .probe = rtase_init_one,
+ .remove = rtase_remove_one,
+ .shutdown = rtase_shutdown,
+ .driver.pm = pm_ptr(&rtase_pm_ops),
+};
+
+module_pci_driver(rtase_pci_driver);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 9893c91af105..a7de5cf6b317 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1052,6 +1052,7 @@ struct ravb_hw_info {
netdev_features_t net_features;
int stats_len;
u32 tccr_mask;
+ u32 tx_max_frame_size;
u32 rx_max_frame_size;
u32 rx_buffer_size;
u32 rx_desc_size;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c02fb296bf7d..907af4651c55 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -555,8 +555,16 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
static void ravb_emac_init_rcar(struct net_device *ndev)
{
- /* Receive frame limit set register */
- ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ /* Set receive frame length
+ *
+ * The length set here describes the frame from the destination address
+ * up to and including the CRC data. However only the frame data,
+ * excluding the CRC, are transferred to memory. To allow for the
+ * largest frames add the CRC length to the maximum Rx descriptor size.
+ */
+ ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
ravb_write(ndev, ECMR_ZPF | ECMR_DM |
@@ -1742,20 +1750,19 @@ static int ravb_get_ts_info(struct net_device *ndev,
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *hw_info = priv->info;
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_ALL);
- if (hw_info->gptp || hw_info->ccc_gac)
+ if (hw_info->gptp || hw_info->ccc_gac) {
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_ALL);
info->phc_index = ptp_clock_index(priv->ptp.clock);
+ }
return 0;
}
@@ -2674,6 +2681,7 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
@@ -2696,6 +2704,7 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
@@ -2721,6 +2730,7 @@ static const struct ravb_hw_info ravb_gen4_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
@@ -2770,6 +2780,7 @@ static const struct ravb_hw_info gbeth_hw_info = {
.net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
.tccr_mask = TCCR_TSRQ0,
+ .tx_max_frame_size = 1522,
.rx_max_frame_size = SZ_8K,
.rx_buffer_size = SZ_2K,
.rx_desc_size = sizeof(struct ravb_rx_desc),
@@ -2981,7 +2992,7 @@ static int ravb_probe(struct platform_device *pdev)
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
- ndev->max_mtu = info->rx_max_frame_size -
+ ndev->max_mtu = info->tx_max_frame_size -
(ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index ff50e20856ec..b80aa27a7214 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -1815,8 +1815,6 @@ static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts
info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
index 0e6cea42f007..6b3f7fca8d15 100644
--- a/drivers/net/ethernet/renesas/rtsn.c
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -1057,6 +1057,7 @@ static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb->len >= TX_DS) {
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
+ dev_kfree_skb_any(skb);
goto out;
}
@@ -1219,8 +1220,6 @@ static int rtsn_get_ts_info(struct net_device *ndev,
info->phc_index = ptp_clock_index(priv->ptp_priv->clock);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index e097ce3e69ea..84fa911c78db 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2575,7 +2575,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx);
rocker_carrier_init(rocker_port);
- dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
+ dev->features |= NETIF_F_SG;
+ dev->netns_local = true;
/* MTU range: 68 - 9000 */
dev->min_mtu = ROCKER_PORT_MIN_MTU;
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index c672f92d65e9..9319a2675e7b 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -847,9 +847,11 @@ static void ether3_remove(struct expansion_card *ec)
{
struct net_device *dev = ecard_get_drvdata(ec);
+ ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
ecard_set_drvdata(ec, NULL);
unregister_netdev(dev);
+ del_timer_sync(&priv(dev)->timer);
free_netdev(dev);
ecard_release_resources(ec);
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 7d69302ffa0a..de131fc5fa0b 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4302,3 +4302,130 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.sensor_event = efx_mcdi_sensor_event,
.rx_recycle_ring_size = efx_ef10_recycle_ring_size,
};
+
+const struct efx_nic_type efx_x4_nic_type = {
+ .is_vf = false,
+ .mem_bar = efx_ef10_pf_mem_bar,
+ .mem_map_size = efx_ef10_mem_map_size,
+ .probe = efx_ef10_probe_pf,
+ .remove = efx_ef10_remove,
+ .dimension_resources = efx_ef10_dimension_resources,
+ .init = efx_ef10_init_nic,
+ .fini = efx_ef10_fini_nic,
+ .map_reset_reason = efx_ef10_map_reset_reason,
+ .map_reset_flags = efx_ef10_map_reset_flags,
+ .reset = efx_ef10_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_fini_dmaq,
+ .prepare_flr = efx_ef10_prepare_flr,
+ .finish_flr = efx_port_dummy_op_void,
+ .describe_stats = efx_ef10_describe_stats,
+ .update_stats = efx_ef10_update_stats_pf,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+ .push_irq_moderation = efx_ef10_push_irq_moderation,
+ .reconfigure_mac = efx_ef10_mac_reconfigure,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = efx_ef10_get_wol,
+ .set_wol = efx_ef10_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+ .get_fec_stats = efx_ef10_get_fec_stats,
+ .test_chip = efx_ef10_test_chip,
+ .test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = efx_ef10_mcdi_request,
+ .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+ .mcdi_read_response = efx_ef10_mcdi_read_response,
+ .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef10_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .irq_handle_msi = efx_ef10_msi_interrupt,
+ .tx_probe = efx_ef10_tx_probe,
+ .tx_init = efx_ef10_tx_init,
+ .tx_write = efx_ef10_tx_write,
+ .tx_limit_len = efx_ef10_tx_limit_len,
+ .tx_enqueue = __efx_enqueue_skb,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
+ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = efx_ef10_rx_write,
+ .rx_defer_refill = efx_ef10_rx_defer_refill,
+ .rx_packet = __efx_rx_packet,
+ .ev_probe = efx_mcdi_ev_probe,
+ .ev_init = efx_ef10_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .ev_process = efx_ef10_ev_process,
+ .ev_read_ack = efx_ef10_ev_read_ack,
+ .ev_test_generate = efx_ef10_ev_test_generate,
+ .filter_table_probe = efx_ef10_filter_table_probe,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = efx_ef10_filter_table_remove,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = efx_ef10_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = efx_ef10_ptp_write_host_time,
+ .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+ .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
+ .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
+ .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
+ .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
+#ifdef CONFIG_SFC_SRIOV
+ /* currently set to the VF versions of these functions
+ * because SRIOV will be reimplemented later.
+ */
+ .vswitching_probe = efx_ef10_vswitching_probe_vf,
+ .vswitching_restore = efx_ef10_vswitching_restore_vf,
+ .vswitching_remove = efx_ef10_vswitching_remove_vf,
+#endif
+ .get_mac_address = efx_ef10_get_mac_address_pf,
+ .set_mac_address = efx_ef10_set_mac_address,
+ .tso_versions = efx_ef10_tso_versions,
+
+ .get_phys_port_id = efx_ef10_get_phys_port_id,
+ .revision = EFX_REV_X4,
+ .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
+ .can_rx_scatter = true,
+ .always_rx_scatter = true,
+ .option_descriptors = true,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+ .offload_features = EF10_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
+ .check_caps = ef10_check_caps,
+ .print_additional_fwver = efx_ef10_print_additional_fwver,
+ .sensor_event = efx_mcdi_sensor_event,
+ .rx_recycle_ring_size = efx_ef10_recycle_ring_size,
+};
+
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 896ffca4aee2..5c2551369812 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -37,7 +37,6 @@ ef100_ethtool_get_ringparam(struct net_device *net_dev,
/* Ethtool options available
*/
const struct ethtool_ops ef100_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.get_drvinfo = efx_ethtool_get_drvinfo,
.get_msglevel = efx_ethtool_get_msglevel,
.set_msglevel = efx_ethtool_set_msglevel,
@@ -59,6 +58,7 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_per_ctx_key = true,
.rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c
index 0b3083ef0ead..e923e1796369 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.c
+++ b/drivers/net/ethernet/sfc/ef100_rep.c
@@ -233,8 +233,8 @@ static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops;
net_dev->min_mtu = EFX_MIN_MTU;
net_dev->max_mtu = EFX_MAX_MTU;
- net_dev->features |= NETIF_F_LLTX;
- net_dev->hw_features |= NETIF_F_LLTX;
+ net_dev->lltx = true;
+
return efv;
fail1:
free_netdev(net_dev);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 6f1a01ded7d4..36b3b57e2055 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -821,6 +821,10 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0c03), /* X4 PF (FF/LL) */
+ .driver_data = (unsigned long)&efx_x4_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x2c03), /* X4 PF (FF only) */
+ .driver_data = (unsigned long)&efx_x4_nic_type},
{0} /* end of list */
};
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index c9e17a8208a9..f1723a6fb082 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -1260,7 +1260,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
spent = efx_process_channel(channel, budget);
- xdp_do_flush();
+ if (budget)
+ xdp_do_flush();
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 7c887160e2ef..bb1930818beb 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -230,17 +230,11 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
- /* Software capabilities */
- ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE);
- ts_info->phc_index = -1;
-
efx_ptp_get_ts_info(efx, ts_info);
return 0;
}
const struct ethtool_ops efx_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -268,6 +262,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_rxnfc = efx_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_per_ctx_key = true,
.rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 1db64fc6e909..9fa5c4c713ab 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -211,4 +211,6 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
extern const struct efx_nic_type efx_hunt_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
+extern const struct efx_nic_type efx_x4_nic_type;
+
#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index 466df5348b29..7ec4ac7b7ff5 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -21,6 +21,7 @@ enum {
*/
EFX_REV_HUNT_A0 = 4,
EFX_REV_EF100 = 5,
+ EFX_REV_X4 = 6,
};
static inline int efx_nic_rev(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 6fd2fdbaa418..aaacdcfa54ae 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -884,7 +884,7 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
- timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
+ timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND);
timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
/* Ignore seconds */
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index a7346e965bfe..d120b3c83ac0 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -1285,7 +1285,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
spent = efx_process_channel(channel, budget);
- xdp_do_flush();
+ if (budget)
+ xdp_do_flush();
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index cf195162e270..a0966f879664 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -725,7 +725,6 @@ void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method)
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
- mutex_lock(&efx->rss_lock);
efx->type->fini(efx);
}
@@ -786,9 +785,6 @@ int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
" VFs may not function\n", rc);
#endif
- if (efx->type->rx_restore_rss_contexts)
- efx->type->rx_restore_rss_contexts(efx);
- mutex_unlock(&efx->rss_lock);
efx->type->filter_table_restore(efx);
up_write(&efx->filter_sem);
if (efx->type->sriov_reset)
@@ -806,7 +802,6 @@ int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
fail:
efx->port_initialized = false;
- mutex_unlock(&efx->rss_lock);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
@@ -1016,9 +1011,7 @@ int efx_siena_init_struct(struct efx_nic *efx,
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
efx->rx_packet_ts_offset =
efx->type->rx_ts_offset - efx->type->rx_prefix_size;
- INIT_LIST_HEAD(&efx->rss_context.list);
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- mutex_init(&efx->rss_lock);
efx->vport_id = EVB_PORT_ID_ASSIGNED;
spin_lock_init(&efx->stats_lock);
efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index 4c182d4edfc2..c5ad84db9613 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -230,17 +230,11 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
- /* Software capabilities */
- ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE);
- ts_info->phc_index = -1;
-
efx_siena_ptp_get_ts_info(efx, ts_info);
return 0;
}
const struct ethtool_ops efx_siena_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
index 5f0a8127e967..075fef64de68 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -820,27 +820,16 @@ int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
return 0;
case ETHTOOL_GRXFH: {
- struct efx_rss_context *ctx = &efx->rss_context;
__u64 data;
- mutex_lock(&efx->rss_lock);
- if (info->flow_type & FLOW_RSS && info->rss_context) {
- ctx = efx_siena_find_rss_context_entry(efx,
- info->rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- }
-
data = 0;
- if (!efx_rss_active(ctx)) /* No RSS */
- goto out_setdata_unlock;
+ if (!efx_rss_active(&efx->rss_context)) /* No RSS */
+ goto out_setdata;
- switch (info->flow_type & ~FLOW_RSS) {
+ switch (info->flow_type) {
case UDP_V4_FLOW:
case UDP_V6_FLOW:
- if (ctx->rx_hash_udp_4tuple)
+ if (efx->rss_context.rx_hash_udp_4tuple)
data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
RXH_IP_SRC | RXH_IP_DST);
else
@@ -862,10 +851,8 @@ int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
default:
break;
}
-out_setdata_unlock:
+out_setdata:
info->data = data;
-out_unlock:
- mutex_unlock(&efx->rss_lock);
return rc;
}
@@ -1164,47 +1151,12 @@ u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev)
return efx->type->rx_hash_key_size;
}
-static int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev,
- struct ethtool_rxfh_param *rxfh)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_rss_context *ctx;
- int rc = 0;
-
- if (!efx->type->rx_pull_rss_context_config)
- return -EOPNOTSUPP;
-
- mutex_lock(&efx->rss_lock);
- ctx = efx_siena_find_rss_context_entry(efx, rxfh->rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- rc = efx->type->rx_pull_rss_context_config(efx, ctx);
- if (rc)
- goto out_unlock;
-
- rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (rxfh->indir)
- memcpy(rxfh->indir, ctx->rx_indir_table,
- sizeof(ctx->rx_indir_table));
- if (rxfh->key)
- memcpy(rxfh->key, ctx->rx_hash_key,
- efx->type->rx_hash_key_size);
-out_unlock:
- mutex_unlock(&efx->rss_lock);
- return rc;
-}
-
int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
- if (rxfh->rss_context)
- return efx_siena_ethtool_get_rxfh_context(net_dev, rxfh);
-
rc = efx->type->rx_pull_rss_config(efx);
if (rc)
return rc;
@@ -1219,70 +1171,6 @@ int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
return 0;
}
-static int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
- struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- u32 *rss_context = &rxfh->rss_context;
- struct efx_rss_context *ctx;
- u32 *indir = rxfh->indir;
- bool allocated = false;
- u8 *key = rxfh->key;
- int rc;
-
- if (!efx->type->rx_push_rss_context_config)
- return -EOPNOTSUPP;
-
- mutex_lock(&efx->rss_lock);
-
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- if (rxfh->rss_delete) {
- /* alloc + delete == Nothing to do */
- rc = -EINVAL;
- goto out_unlock;
- }
- ctx = efx_siena_alloc_rss_context_entry(efx);
- if (!ctx) {
- rc = -ENOMEM;
- goto out_unlock;
- }
- ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- /* Initialise indir table and key to defaults */
- efx_siena_set_default_rx_indir_table(efx, ctx);
- netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
- allocated = true;
- } else {
- ctx = efx_siena_find_rss_context_entry(efx, *rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- }
-
- if (rxfh->rss_delete) {
- /* delete this context */
- rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
- if (!rc)
- efx_siena_free_rss_context_entry(ctx);
- goto out_unlock;
- }
-
- if (!key)
- key = ctx->rx_hash_key;
- if (!indir)
- indir = ctx->rx_indir_table;
-
- rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
- if (rc && allocated)
- efx_siena_free_rss_context_entry(ctx);
- else
- *rss_context = ctx->user_id;
-out_unlock:
- mutex_unlock(&efx->rss_lock);
- return rc;
-}
-
int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
@@ -1296,9 +1184,6 @@ int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->rss_context)
- efx_siena_ethtool_set_rxfh_context(net_dev, rxfh, extack);
-
if (!indir && !key)
return 0;
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index 94152f595acd..3fa7c652ae9b 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -707,20 +707,14 @@ struct vfdi_status;
/* The reserved RSS context value */
#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff
/**
- * struct efx_rss_context - A user-defined RSS context for filtering
- * @list: node of linked list on which this struct is stored
- * @context_id: the RSS_CONTEXT_ID returned by MC firmware, or
- * %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC.
- * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
- * @user_id: the rss_context ID exposed to userspace over ethtool.
+ * struct efx_rss_context - An RSS context for filtering
+ * @context_id: 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
* @rx_hash_key: Toeplitz hash key for this RSS context
* @indir_table: Indirection table for this RSS context
*/
struct efx_rss_context {
- struct list_head list;
u32 context_id;
- u32 user_id;
bool rx_hash_udp_4tuple;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
@@ -851,9 +845,7 @@ enum efx_xdp_tx_queues_mode {
* @rx_packet_ts_offset: Offset of timestamp from start of packet data
* (valid only if channel->sync_timestamps_enabled; always negative)
* @rx_scatter: Scatter mode enabled for receives
- * @rss_context: Main RSS context. Its @list member is the head of the list of
- * RSS contexts created by user requests
- * @rss_lock: Protects custom RSS context software state in @rss_context.list
+ * @rss_context: Main RSS context
* @vport_id: The function's vport ID, only relevant for PFs
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
@@ -1018,7 +1010,6 @@ struct efx_nic {
int rx_packet_ts_offset;
bool rx_scatter;
struct efx_rss_context rss_context;
- struct mutex rss_lock;
u32 vport_id;
unsigned int_error_count;
@@ -1220,10 +1211,6 @@ struct efx_udp_tunnel {
* @tx_enqueue: Add an SKB to TX queue
* @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
* @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC
- * @rx_push_rss_context_config: Write RSS hash key and indirection table for
- * user RSS context to the NIC
- * @rx_pull_rss_context_config: Read RSS hash key and indirection table for user
- * RSS context back from the NIC
* @rx_probe: Allocate resources for RX queue
* @rx_init: Initialise RX queue on the NIC
* @rx_remove: Free resources for RX queue
@@ -1366,13 +1353,6 @@ struct efx_nic_type {
int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
const u32 *rx_indir_table, const u8 *key);
int (*rx_pull_rss_config)(struct efx_nic *efx);
- int (*rx_push_rss_context_config)(struct efx_nic *efx,
- struct efx_rss_context *ctx,
- const u32 *rx_indir_table,
- const u8 *key);
- int (*rx_pull_rss_context_config)(struct efx_nic *efx,
- struct efx_rss_context *ctx);
- void (*rx_restore_rss_contexts)(struct efx_nic *efx);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c
index c473a4b6dd44..85005196b4c5 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.c
+++ b/drivers/net/ethernet/sfc/siena/ptp.c
@@ -897,7 +897,7 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
- timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
+ timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND);
timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
/* Ignore seconds */
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index 219fb358a646..082e35c6caaa 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -558,62 +558,6 @@ efx_siena_rx_packet_gro(struct efx_channel *channel,
napi_gro_frags(napi);
}
-/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
- * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
- */
-struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx, *new;
- u32 id = 1; /* Don't use zero, that refers to the master RSS context */
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- /* Search for first gap in the numbering */
- list_for_each_entry(ctx, head, list) {
- if (ctx->user_id != id)
- break;
- id++;
- /* Check for wrap. If this happens, we have nearly 2^32
- * allocated RSS contexts, which seems unlikely.
- */
- if (WARN_ON_ONCE(!id))
- return NULL;
- }
-
- /* Create the new entry */
- new = kmalloc(sizeof(*new), GFP_KERNEL);
- if (!new)
- return NULL;
- new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- new->rx_hash_udp_4tuple = false;
-
- /* Insert the new entry into the gap */
- new->user_id = id;
- list_add_tail(&new->list, &ctx->list);
- return new;
-}
-
-struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
- u32 id)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- list_for_each_entry(ctx, head, list)
- if (ctx->user_id == id)
- return ctx;
- return NULL;
-}
-
-void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx)
-{
- list_del(&ctx->list);
- kfree(ctx);
-}
-
void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
struct efx_rss_context *ctx)
{
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.h b/drivers/net/ethernet/sfc/siena/rx_common.h
index 6b37f83ecb30..f90a8320d396 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.h
+++ b/drivers/net/ethernet/sfc/siena/rx_common.h
@@ -78,10 +78,6 @@ efx_siena_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh, __wsum csum);
-struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx);
-struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
- u32 id);
-void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx);
void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
struct efx_rss_context *ctx);
diff --git a/drivers/net/ethernet/sfc/tc_counters.c b/drivers/net/ethernet/sfc/tc_counters.c
index c44088424323..a421b0123506 100644
--- a/drivers/net/ethernet/sfc/tc_counters.c
+++ b/drivers/net/ethernet/sfc/tc_counters.c
@@ -249,7 +249,7 @@ struct efx_tc_counter_index *efx_tc_flower_get_counter_index(
&ctr->linkage,
efx_tc_counter_id_ht_params);
kfree(ctr);
- return (void *)cnt; /* it's an ERR_PTR */
+ return ERR_CAST(cnt);
}
ctr->cnt = cnt;
refcount_set(&ctr->ref, 1);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 907498848028..a5e23e2da90f 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2355,7 +2355,7 @@ static int smc_drv_probe(struct platform_device *pdev)
* the resource supplies a trigger, override the irqflags with
* the trigger flags from the resource.
*/
- irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
+ irq_resflags = irq_get_trigger_type(ndev->irq);
if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 15cb96c2506d..f30d4b17c7fb 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -18,7 +18,7 @@
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "smsc9420.h"
#define DRV_NAME "smsc9420"
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index cd36ff4da68c..684489156dce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -29,6 +29,7 @@
/* Synopsys Core versions */
#define DWMAC_CORE_3_40 0x34
#define DWMAC_CORE_3_50 0x35
+#define DWMAC_CORE_3_70 0x37
#define DWMAC_CORE_4_00 0x40
#define DWMAC_CORE_4_10 0x41
#define DWMAC_CORE_5_00 0x50
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index 9e40c28d453a..bfe6e2d631bd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -8,15 +8,90 @@
#include <linux/device.h>
#include <linux/of_irq.h>
#include "stmmac.h"
+#include "dwmac_dma.h"
+#include "dwmac1000.h"
+
+/* Normal Loongson Tx Summary */
+#define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000
+/* Normal Loongson Rx Summary */
+#define DMA_INTR_ENA_NIE_RX_LOONGSON 0x00020000
+
+#define DMA_INTR_NORMAL_LOONGSON (DMA_INTR_ENA_NIE_TX_LOONGSON | \
+ DMA_INTR_ENA_NIE_RX_LOONGSON | \
+ DMA_INTR_ENA_RIE | DMA_INTR_ENA_TIE)
+
+/* Abnormal Loongson Tx Summary */
+#define DMA_INTR_ENA_AIE_TX_LOONGSON 0x00010000
+/* Abnormal Loongson Rx Summary */
+#define DMA_INTR_ENA_AIE_RX_LOONGSON 0x00008000
+
+#define DMA_INTR_ABNORMAL_LOONGSON (DMA_INTR_ENA_AIE_TX_LOONGSON | \
+ DMA_INTR_ENA_AIE_RX_LOONGSON | \
+ DMA_INTR_ENA_FBE | DMA_INTR_ENA_UNE)
+
+#define DMA_INTR_DEFAULT_MASK_LOONGSON (DMA_INTR_NORMAL_LOONGSON | \
+ DMA_INTR_ABNORMAL_LOONGSON)
+
+/* Normal Loongson Tx Interrupt Summary */
+#define DMA_STATUS_NIS_TX_LOONGSON 0x00040000
+/* Normal Loongson Rx Interrupt Summary */
+#define DMA_STATUS_NIS_RX_LOONGSON 0x00020000
+
+/* Abnormal Loongson Tx Interrupt Summary */
+#define DMA_STATUS_AIS_TX_LOONGSON 0x00010000
+/* Abnormal Loongson Rx Interrupt Summary */
+#define DMA_STATUS_AIS_RX_LOONGSON 0x00008000
+
+/* Fatal Loongson Tx Bus Error Interrupt */
+#define DMA_STATUS_FBI_TX_LOONGSON 0x00002000
+/* Fatal Loongson Rx Bus Error Interrupt */
+#define DMA_STATUS_FBI_RX_LOONGSON 0x00001000
+
+#define DMA_STATUS_MSK_COMMON_LOONGSON (DMA_STATUS_NIS_TX_LOONGSON | \
+ DMA_STATUS_NIS_RX_LOONGSON | \
+ DMA_STATUS_AIS_TX_LOONGSON | \
+ DMA_STATUS_AIS_RX_LOONGSON | \
+ DMA_STATUS_FBI_TX_LOONGSON | \
+ DMA_STATUS_FBI_RX_LOONGSON)
+
+#define DMA_STATUS_MSK_RX_LOONGSON (DMA_STATUS_ERI | DMA_STATUS_RWT | \
+ DMA_STATUS_RPS | DMA_STATUS_RU | \
+ DMA_STATUS_RI | DMA_STATUS_OVF | \
+ DMA_STATUS_MSK_COMMON_LOONGSON)
+
+#define DMA_STATUS_MSK_TX_LOONGSON (DMA_STATUS_ETI | DMA_STATUS_UNF | \
+ DMA_STATUS_TJT | DMA_STATUS_TU | \
+ DMA_STATUS_TPS | DMA_STATUS_TI | \
+ DMA_STATUS_MSK_COMMON_LOONGSON)
+
+#define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03
+#define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13
+#define DWMAC_CORE_LS_MULTICHAN 0x10 /* Loongson custom ID */
+#define CHANNEL_NUM 8
+
+struct loongson_data {
+ u32 loongson_id;
+ struct device *dev;
+};
+
+struct stmmac_pci_info {
+ int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
+};
-static int loongson_default_data(struct plat_stmmacenet_data *plat)
+static void loongson_default_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
{
+ /* Get bus_id, this can be overwritten later */
+ plat->bus_id = pci_dev_id(pdev);
+
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
/* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
+ plat->multicast_filter_bins = 256;
+
+ plat->mac_interface = PHY_INTERFACE_MODE_NA;
/* Set default value for unicast filter entries */
plat->unicast_filter_entries = 1;
@@ -24,10 +99,6 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat)
/* Set the maxmtu to a default of JUMBO_LEN */
plat->maxmtu = JUMBO_LEN;
- /* Set default number of RX and TX queues to use */
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
-
/* Disable Priority config by default */
plat->tx_queues_cfg[0].use_prio = false;
plat->rx_queues_cfg[0].use_prio = false;
@@ -35,30 +106,424 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat)
/* Disable RX queues routing by default */
plat->rx_queues_cfg[0].pkt_route = 0x0;
+ plat->clk_ref_rate = 125000000;
+ plat->clk_ptp_rate = 125000000;
+
/* Default to phy auto-detection */
plat->phy_addr = -1;
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
+}
+
+static int loongson_gmac_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct loongson_data *ld;
+ int i;
+
+ ld = plat->bsp_priv;
+
+ loongson_default_data(pdev, plat);
+
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
+ plat->rx_queues_to_use = CHANNEL_NUM;
+ plat->tx_queues_to_use = CHANNEL_NUM;
+
+ /* Only channel 0 supports checksum,
+ * so turn off checksum to enable multiple channels.
+ */
+ for (i = 1; i < CHANNEL_NUM; i++)
+ plat->tx_queues_cfg[i].coe_unsupported = 1;
+ } else {
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
+ }
+
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
- plat->multicast_filter_bins = 256;
return 0;
}
-static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static struct stmmac_pci_info loongson_gmac_pci_info = {
+ .setup = loongson_gmac_data,
+};
+
+static void loongson_gnet_fix_speed(void *priv, unsigned int speed,
+ unsigned int mode)
{
- struct plat_stmmacenet_data *plat;
- struct stmmac_resources res;
- struct device_node *np;
- int ret, i, phy_mode;
+ struct loongson_data *ld = (struct loongson_data *)priv;
+ struct net_device *ndev = dev_get_drvdata(ld->dev);
+ struct stmmac_priv *ptr = netdev_priv(ndev);
+
+ /* The integrated PHY has a weird problem with switching from the low
+ * speeds to 1000Mbps mode. The speedup procedure requires the PHY-link
+ * re-negotiation.
+ */
+ if (speed == SPEED_1000) {
+ if (readl(ptr->ioaddr + MAC_CTRL_REG) &
+ GMAC_CONTROL_PS)
+ /* Word around hardware bug, restart autoneg */
+ phy_restart_aneg(ndev->phydev);
+ }
+}
- np = dev_of_node(&pdev->dev);
+static int loongson_gnet_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct loongson_data *ld;
+ int i;
- if (!np) {
- pr_info("dwmac_loongson_pci: No OF node\n");
- return -ENODEV;
+ ld = plat->bsp_priv;
+
+ loongson_default_data(pdev, plat);
+
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
+ plat->rx_queues_to_use = CHANNEL_NUM;
+ plat->tx_queues_to_use = CHANNEL_NUM;
+
+ /* Only channel 0 supports checksum,
+ * so turn off checksum to enable multiple channels.
+ */
+ for (i = 1; i < CHANNEL_NUM; i++)
+ plat->tx_queues_cfg[i].coe_unsupported = 1;
+ } else {
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
}
+ plat->phy_interface = PHY_INTERFACE_MODE_GMII;
+ plat->mdio_bus_data->phy_mask = ~(u32)BIT(2);
+ plat->fix_mac_speed = loongson_gnet_fix_speed;
+
+ return 0;
+}
+
+static struct stmmac_pci_info loongson_gnet_pci_info = {
+ .setup = loongson_gnet_data,
+};
+
+static void loongson_dwmac_dma_init_channel(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 chan)
+{
+ int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+ int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+ u32 value;
+
+ value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan));
+
+ if (dma_cfg->pblx8)
+ value |= DMA_BUS_MODE_MAXPBL;
+
+ value |= DMA_BUS_MODE_USP;
+ value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
+ value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
+ value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+
+ /* Set the Fixed burst mode */
+ if (dma_cfg->fixed_burst)
+ value |= DMA_BUS_MODE_FB;
+
+ /* Mixed Burst has no effect when fb is set */
+ if (dma_cfg->mixed_burst)
+ value |= DMA_BUS_MODE_MB;
+
+ if (dma_cfg->atds)
+ value |= DMA_BUS_MODE_ATDS;
+
+ if (dma_cfg->aal)
+ value |= DMA_BUS_MODE_AAL;
+
+ writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan));
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK_LOONGSON, ioaddr +
+ DMA_CHAN_INTR_ENA(chan));
+}
+
+static int loongson_dwmac_dma_interrupt(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
+ struct stmmac_extra_stats *x,
+ u32 chan, u32 dir)
+{
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
+ u32 abnor_intr_status;
+ u32 nor_intr_status;
+ u32 fb_intr_status;
+ u32 intr_status;
+ int ret = 0;
+
+ /* read the status register (CSR5) */
+ intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
+
+ if (dir == DMA_DIR_RX)
+ intr_status &= DMA_STATUS_MSK_RX_LOONGSON;
+ else if (dir == DMA_DIR_TX)
+ intr_status &= DMA_STATUS_MSK_TX_LOONGSON;
+
+ nor_intr_status = intr_status & (DMA_STATUS_NIS_TX_LOONGSON |
+ DMA_STATUS_NIS_RX_LOONGSON);
+ abnor_intr_status = intr_status & (DMA_STATUS_AIS_TX_LOONGSON |
+ DMA_STATUS_AIS_RX_LOONGSON);
+ fb_intr_status = intr_status & (DMA_STATUS_FBI_TX_LOONGSON |
+ DMA_STATUS_FBI_RX_LOONGSON);
+
+ /* ABNORMAL interrupts */
+ if (unlikely(abnor_intr_status)) {
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ ret = tx_hard_error_bump_tc;
+ x->tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT))
+ x->tx_jabber_irq++;
+ if (unlikely(intr_status & DMA_STATUS_OVF))
+ x->rx_overflow_irq++;
+ if (unlikely(intr_status & DMA_STATUS_RU))
+ x->rx_buf_unav_irq++;
+ if (unlikely(intr_status & DMA_STATUS_RPS))
+ x->rx_process_stopped_irq++;
+ if (unlikely(intr_status & DMA_STATUS_RWT))
+ x->rx_watchdog_irq++;
+ if (unlikely(intr_status & DMA_STATUS_ETI))
+ x->tx_early_irq++;
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(fb_intr_status)) {
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (likely(nor_intr_status)) {
+ if (likely(intr_status & DMA_STATUS_RI)) {
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+ /* to schedule NAPI on real RIE event. */
+ if (likely(value & DMA_INTR_ENA_RIE)) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
+ ret |= handle_rx;
+ }
+ }
+ if (likely(intr_status & DMA_STATUS_TI)) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
+ ret |= handle_tx;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ERI))
+ x->rx_early_irq++;
+ }
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_warn("%s: unexpected status %08x\n", __func__, intr_status);
+
+ /* Clear the interrupt by writing a logic 1 to the CSR5[19-0] */
+ writel((intr_status & 0x7ffff), ioaddr + DMA_CHAN_STATUS(chan));
+
+ return ret;
+}
+
+static struct mac_device_info *loongson_dwmac_setup(void *apriv)
+{
+ struct stmmac_priv *priv = apriv;
+ struct mac_device_info *mac;
+ struct stmmac_dma_ops *dma;
+ struct loongson_data *ld;
+ struct pci_dev *pdev;
+
+ ld = priv->plat->bsp_priv;
+ pdev = to_pci_dev(priv->device);
+
+ mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ dma = devm_kzalloc(priv->device, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return NULL;
+
+ /* The Loongson GMAC and GNET devices are based on the DW GMAC
+ * v3.50a and v3.73a IP-cores. But the HW designers have changed the
+ * GMAC_VERSION.SNPSVER field to the custom 0x10 value on the
+ * network controllers with the multi-channels feature
+ * available to emphasize the differences: multiple DMA-channels,
+ * AV feature and GMAC_INT_STATUS CSR flags layout. Get back the
+ * original value so the correct HW-interface would be selected.
+ */
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
+ priv->synopsys_id = DWMAC_CORE_3_70;
+ *dma = dwmac1000_dma_ops;
+ dma->init_chan = loongson_dwmac_dma_init_channel;
+ dma->dma_interrupt = loongson_dwmac_dma_interrupt;
+ mac->dma = dma;
+ }
+
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Pre-initialize the respective "mac" fields as it's done in
+ * dwmac1000_setup()
+ */
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ /* Loongson GMAC doesn't support the flow control. LS2K2000
+ * GNET doesn't support the half-duplex link mode.
+ */
+ if (pdev->device == PCI_DEVICE_ID_LOONGSON_GMAC) {
+ mac->link.caps = MAC_10 | MAC_100 | MAC_1000;
+ } else {
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+ else
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD;
+ }
+
+ mac->link.duplex = GMAC_CONTROL_DM;
+ mac->link.speed10 = GMAC_CONTROL_PS;
+ mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+ mac->mii.addr = GMAC_MII_ADDR;
+ mac->mii.data = GMAC_MII_DATA;
+ mac->mii.addr_shift = 11;
+ mac->mii.addr_mask = 0x0000F800;
+ mac->mii.reg_shift = 6;
+ mac->mii.reg_mask = 0x000007C0;
+ mac->mii.clk_csr_shift = 2;
+ mac->mii.clk_csr_mask = GENMASK(5, 2);
+
+ return mac;
+}
+
+static int loongson_dwmac_msi_config(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ int i, ret, vecs;
+
+ vecs = roundup_pow_of_two(CHANNEL_NUM * 2 + 1);
+ ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_warn(&pdev->dev, "Failed to allocate MSI IRQs\n");
+ return ret;
+ }
+
+ res->irq = pci_irq_vector(pdev, 0);
+
+ for (i = 0; i < plat->rx_queues_to_use; i++) {
+ res->rx_irq[CHANNEL_NUM - 1 - i] =
+ pci_irq_vector(pdev, 1 + i * 2);
+ }
+
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ res->tx_irq[CHANNEL_NUM - 1 - i] =
+ pci_irq_vector(pdev, 2 + i * 2);
+ }
+
+ plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
+
+ return 0;
+}
+
+static void loongson_dwmac_msi_clear(struct pci_dev *pdev)
+{
+ pci_free_irq_vectors(pdev);
+}
+
+static int loongson_dwmac_dt_config(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ struct device_node *np = dev_of_node(&pdev->dev);
+ int ret;
+
+ plat->mdio_node = of_get_child_by_name(np, "mdio");
+ if (plat->mdio_node) {
+ dev_info(&pdev->dev, "Found MDIO subnode\n");
+ plat->mdio_bus_data->needs_reset = true;
+ }
+
+ ret = of_alias_get_id(np, "ethernet");
+ if (ret >= 0)
+ plat->bus_id = ret;
+
+ res->irq = of_irq_get_byname(np, "macirq");
+ if (res->irq < 0) {
+ dev_err(&pdev->dev, "IRQ macirq not found\n");
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ res->wol_irq = of_irq_get_byname(np, "eth_wake_irq");
+ if (res->wol_irq < 0) {
+ dev_info(&pdev->dev,
+ "IRQ eth_wake_irq not found, using macirq\n");
+ res->wol_irq = res->irq;
+ }
+
+ res->lpi_irq = of_irq_get_byname(np, "eth_lpi");
+ if (res->lpi_irq < 0) {
+ dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ ret = device_get_phy_mode(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "phy_mode not found\n");
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ plat->phy_interface = ret;
+
+ return 0;
+
+err_put_node:
+ of_node_put(plat->mdio_node);
+
+ return ret;
+}
+
+static void loongson_dwmac_dt_clear(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ of_node_put(plat->mdio_node);
+}
+
+static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ if (!pdev->irq)
+ return -EINVAL;
+
+ res->irq = pdev->irq;
+
+ return 0;
+}
+
+static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_pci_info *info;
+ struct stmmac_resources res;
+ struct loongson_data *ld;
+ int ret, i;
+
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
return -ENOMEM;
@@ -69,25 +534,23 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (!plat->mdio_bus_data)
return -ENOMEM;
- plat->mdio_node = of_get_child_by_name(np, "mdio");
- if (plat->mdio_node) {
- dev_info(&pdev->dev, "Found MDIO subnode\n");
- plat->mdio_bus_data->needs_reset = true;
- }
-
plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
- if (!plat->dma_cfg) {
- ret = -ENOMEM;
- goto err_put_node;
- }
+ if (!plat->dma_cfg)
+ return -ENOMEM;
+
+ ld = devm_kzalloc(&pdev->dev, sizeof(*ld), GFP_KERNEL);
+ if (!ld)
+ return -ENOMEM;
/* Enable pci device */
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__);
- goto err_put_node;
+ return ret;
}
+ pci_set_master(pdev);
+
/* Get the base address of device */
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
@@ -98,59 +561,43 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
break;
}
- plat->bus_id = of_alias_get_id(np, "ethernet");
- if (plat->bus_id < 0)
- plat->bus_id = pci_dev_id(pdev);
-
- phy_mode = device_get_phy_mode(&pdev->dev);
- if (phy_mode < 0) {
- dev_err(&pdev->dev, "phy_mode not found\n");
- ret = phy_mode;
- goto err_disable_device;
- }
-
- plat->phy_interface = phy_mode;
- plat->mac_interface = PHY_INTERFACE_MODE_GMII;
-
- pci_set_master(pdev);
-
- loongson_default_data(plat);
- pci_enable_msi(pdev);
memset(&res, 0, sizeof(res));
res.addr = pcim_iomap_table(pdev)[0];
- res.irq = of_irq_get_byname(np, "macirq");
- if (res.irq < 0) {
- dev_err(&pdev->dev, "IRQ macirq not found\n");
- ret = -ENODEV;
- goto err_disable_msi;
- }
+ plat->bsp_priv = ld;
+ plat->setup = loongson_dwmac_setup;
+ ld->dev = &pdev->dev;
+ ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
- res.wol_irq = of_irq_get_byname(np, "eth_wake_irq");
- if (res.wol_irq < 0) {
- dev_info(&pdev->dev, "IRQ eth_wake_irq not found, using macirq\n");
- res.wol_irq = res.irq;
- }
+ info = (struct stmmac_pci_info *)id->driver_data;
+ ret = info->setup(pdev, plat);
+ if (ret)
+ goto err_disable_device;
- res.lpi_irq = of_irq_get_byname(np, "eth_lpi");
- if (res.lpi_irq < 0) {
- dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
- ret = -ENODEV;
- goto err_disable_msi;
- }
+ if (dev_of_node(&pdev->dev))
+ ret = loongson_dwmac_dt_config(pdev, plat, &res);
+ else
+ ret = loongson_dwmac_acpi_config(pdev, plat, &res);
+ if (ret)
+ goto err_disable_device;
+
+ /* Use the common MAC IRQ if per-channel MSIs allocation failed */
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ loongson_dwmac_msi_config(pdev, plat, &res);
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (ret)
- goto err_disable_msi;
+ goto err_plat_clear;
- return ret;
+ return 0;
-err_disable_msi:
- pci_disable_msi(pdev);
+err_plat_clear:
+ if (dev_of_node(&pdev->dev))
+ loongson_dwmac_dt_clear(pdev, plat);
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ loongson_dwmac_msi_clear(pdev);
err_disable_device:
pci_disable_device(pdev);
-err_put_node:
- of_node_put(plat->mdio_node);
return ret;
}
@@ -158,11 +605,18 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
{
struct net_device *ndev = dev_get_drvdata(&pdev->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
+ struct loongson_data *ld;
int i;
- of_node_put(priv->plat->mdio_node);
+ ld = priv->plat->bsp_priv;
stmmac_dvr_remove(&pdev->dev);
+ if (dev_of_node(&pdev->dev))
+ loongson_dwmac_dt_clear(pdev, priv->plat);
+
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ loongson_dwmac_msi_clear(pdev);
+
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
@@ -170,7 +624,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
break;
}
- pci_disable_msi(pdev);
pci_disable_device(pdev);
}
@@ -213,7 +666,8 @@ static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
loongson_dwmac_resume);
static const struct pci_device_id loongson_dwmac_id_table[] = {
- { PCI_VDEVICE(LOONGSON, 0x7a03) },
+ { PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) },
+ { PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) },
{}
};
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
@@ -232,4 +686,5 @@ module_pci_driver(loongson_dwmac_driver);
MODULE_DESCRIPTION("Loongson DWMAC PCI driver");
MODULE_AUTHOR("Qing Zhang <zhangqing@loongson.cn>");
+MODULE_AUTHOR("Yanteng Si <siyanteng@loongson.cn>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 7ae04d8d291c..50073bdade46 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1116,6 +1116,161 @@ static const struct rk_gmac_ops rk3568_ops = {
},
};
+/* VCCIO0_1_3_IOC */
+#define RK3576_VCCIO0_1_3_IOC_CON2 0X6408
+#define RK3576_VCCIO0_1_3_IOC_CON3 0X640c
+#define RK3576_VCCIO0_1_3_IOC_CON4 0X6410
+#define RK3576_VCCIO0_1_3_IOC_CON5 0X6414
+
+#define RK3576_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3576_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3576_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
+#define RK3576_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
+
+#define RK3576_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3576_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+/* SDGMAC_GRF */
+#define RK3576_GRF_GMAC_CON0 0X0020
+#define RK3576_GRF_GMAC_CON1 0X0024
+
+#define RK3576_GMAC_RMII_MODE GRF_BIT(3)
+#define RK3576_GMAC_RGMII_MODE GRF_CLR_BIT(3)
+
+#define RK3576_GMAC_CLK_SELECT_IO GRF_BIT(7)
+#define RK3576_GMAC_CLK_SELECT_CRU GRF_CLR_BIT(7)
+
+#define RK3576_GMAC_CLK_RMII_DIV2 GRF_BIT(5)
+#define RK3576_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(5)
+
+#define RK3576_GMAC_CLK_RGMII_DIV1 \
+ (GRF_CLR_BIT(6) | GRF_CLR_BIT(5))
+#define RK3576_GMAC_CLK_RGMII_DIV5 \
+ (GRF_BIT(6) | GRF_BIT(5))
+#define RK3576_GMAC_CLK_RGMII_DIV50 \
+ (GRF_BIT(6) | GRF_CLR_BIT(5))
+
+#define RK3576_GMAC_CLK_RMII_GATE GRF_BIT(4)
+#define RK3576_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(4)
+
+static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int offset_con;
+
+ if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
+ dev_err(dev, "Missing rockchip,grf or rockchip,php-grf property\n");
+ return;
+ }
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RGMII_MODE);
+
+ offset_con = bsp_priv->id == 1 ? RK3576_VCCIO0_1_3_IOC_CON4 :
+ RK3576_VCCIO0_1_3_IOC_CON2;
+
+ /* m0 && m1 delay enabled */
+ regmap_write(bsp_priv->php_grf, offset_con,
+ DELAY_ENABLE(RK3576, tx_delay, rx_delay));
+ regmap_write(bsp_priv->php_grf, offset_con + 0x4,
+ DELAY_ENABLE(RK3576, tx_delay, rx_delay));
+
+ /* m0 && m1 delay value */
+ regmap_write(bsp_priv->php_grf, offset_con,
+ RK3576_GMAC_CLK_TX_DL_CFG(tx_delay) |
+ RK3576_GMAC_CLK_RX_DL_CFG(rx_delay));
+ regmap_write(bsp_priv->php_grf, offset_con + 0x4,
+ RK3576_GMAC_CLK_TX_DL_CFG(tx_delay) |
+ RK3576_GMAC_CLK_RX_DL_CFG(rx_delay));
+}
+
+static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int offset_con;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RMII_MODE);
+}
+
+static void rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int val = 0, offset_con;
+
+ switch (speed) {
+ case 10:
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ val = RK3576_GMAC_CLK_RMII_DIV20;
+ else
+ val = RK3576_GMAC_CLK_RGMII_DIV50;
+ break;
+ case 100:
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ val = RK3576_GMAC_CLK_RMII_DIV2;
+ else
+ val = RK3576_GMAC_CLK_RGMII_DIV5;
+ break;
+ case 1000:
+ if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
+ val = RK3576_GMAC_CLK_RGMII_DIV1;
+ else
+ goto err;
+ break;
+ default:
+ goto err;
+ }
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, val);
+
+ return;
+err:
+ dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+}
+
+static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
+ bool enable)
+{
+ unsigned int val = input ? RK3576_GMAC_CLK_SELECT_IO :
+ RK3576_GMAC_CLK_SELECT_CRU;
+ unsigned int offset_con;
+
+ val |= enable ? RK3576_GMAC_CLK_RMII_NOGATE :
+ RK3576_GMAC_CLK_RMII_GATE;
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, val);
+}
+
+static const struct rk_gmac_ops rk3576_ops = {
+ .set_to_rgmii = rk3576_set_to_rgmii,
+ .set_to_rmii = rk3576_set_to_rmii,
+ .set_rgmii_speed = rk3576_set_gmac_speed,
+ .set_rmii_speed = rk3576_set_gmac_speed,
+ .set_clock_selection = rk3576_set_clock_selection,
+ .regs_valid = true,
+ .regs = {
+ 0x2a220000, /* gmac0 */
+ 0x2a230000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+};
+
/* sys_grf */
#define RK3588_GRF_GMAC_CON7 0X031c
#define RK3588_GRF_GMAC_CON8 0X0320
@@ -1141,8 +1296,8 @@ static const struct rk_gmac_ops rk3568_ops = {
#define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id))
#define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id))
-#define RK3588_GMAC_CLK_SELET_CRU(id) GRF_BIT(5 * (id) + 4)
-#define RK3588_GMAC_CLK_SELET_IO(id) GRF_CLR_BIT(5 * (id) + 4)
+#define RK3588_GMAC_CLK_SELECT_CRU(id) GRF_BIT(5 * (id) + 4)
+#define RK3588_GMAC_CLK_SELECT_IO(id) GRF_CLR_BIT(5 * (id) + 4)
#define RK3588_GMA_CLK_RMII_DIV2(id) GRF_BIT(5 * (id) + 2)
#define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2)
@@ -1240,8 +1395,8 @@ err:
static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
bool enable)
{
- unsigned int val = input ? RK3588_GMAC_CLK_SELET_IO(bsp_priv->id) :
- RK3588_GMAC_CLK_SELET_CRU(bsp_priv->id);
+ unsigned int val = input ? RK3588_GMAC_CLK_SELECT_IO(bsp_priv->id) :
+ RK3588_GMAC_CLK_SELECT_CRU(bsp_priv->id);
val |= enable ? RK3588_GMAC_CLK_RMII_NOGATE(bsp_priv->id) :
RK3588_GMAC_CLK_RMII_GATE(bsp_priv->id);
@@ -1908,6 +2063,7 @@ static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
{ .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
+ { .compatible = "rockchip,rk3576-gmac", .data = &rk3576_ops },
{ .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops },
{ .compatible = "rockchip,rv1108-gmac", .data = &rv1108_ops },
{ .compatible = "rockchip,rv1126-gmac", .data = &rv1126_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index e1b761dcfa1d..4a0ae92b3055 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -299,7 +299,7 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr)
* Called from stmmac via stmmac_dma_ops->init
*/
static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
@@ -395,7 +395,7 @@ static void sun8i_dwmac_dma_start_tx(struct stmmac_priv *priv,
writel(v, ioaddr + EMAC_TX_CTL1);
}
-static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr)
+static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
{
u32 v;
@@ -774,8 +774,8 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv)
static int get_ephy_nodes(struct stmmac_priv *priv)
{
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
- struct device_node *mdio_mux, *iphynode;
struct device_node *mdio_internal;
+ struct device_node *mdio_mux;
int ret;
mdio_mux = of_get_child_by_name(priv->device->of_node, "mdio-mux");
@@ -793,7 +793,7 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
}
/* Seek for internal PHY */
- for_each_child_of_node(mdio_internal, iphynode) {
+ for_each_child_of_node_scoped(mdio_internal, iphynode) {
gmac->ephy_clk = of_clk_get(iphynode, 0);
if (IS_ERR(gmac->ephy_clk))
continue;
@@ -801,14 +801,12 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
if (IS_ERR(gmac->rst_ephy)) {
ret = PTR_ERR(gmac->rst_ephy);
if (ret == -EPROBE_DEFER) {
- of_node_put(iphynode);
of_node_put(mdio_internal);
return ret;
}
continue;
}
dev_info(priv->device, "Found internal PHY node\n");
- of_node_put(iphynode);
of_node_put(mdio_internal);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
index 362f85136c3e..6fdd94c8919e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
@@ -127,10 +127,12 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ usleep_range(10, 20); /* 500ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
@@ -143,22 +145,30 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
return err;
}
+ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
- value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
+ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
- value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ msleep(30); /* 30ms delay needed as per HW design */
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_IRQ_STATUS, value,
value & XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS,
500, 500 * 2000);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index adccdd816ea9..118a22406a2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -70,15 +70,17 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(value, ioaddr + DMA_AXI_BUS_MODE);
}
-static void dwmac1000_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+static void dwmac1000_dma_init_channel(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
{
- u32 value = readl(ioaddr + DMA_BUS_MODE);
int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+ u32 value;
- /*
- * Set the DMA PBL (Programmable Burst Length) mode.
+ value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan));
+
+ /* Set the DMA PBL (Programmable Burst Length) mode.
*
* Note: before stmmac core 3.50 this mode bit was 4xPBL, and
* post 3.5 mode bit acts as 8*PBL.
@@ -98,16 +100,16 @@ static void dwmac1000_dma_init(void __iomem *ioaddr,
if (dma_cfg->mixed_burst)
value |= DMA_BUS_MODE_MB;
- if (atds)
+ if (dma_cfg->atds)
value |= DMA_BUS_MODE_ATDS;
if (dma_cfg->aal)
value |= DMA_BUS_MODE_AAL;
- writel(value, ioaddr + DMA_BUS_MODE);
+ writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan));
/* Mask interrupts by writing to CSR7 */
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
static void dwmac1000_dma_init_rx(struct stmmac_priv *priv,
@@ -116,7 +118,7 @@ static void dwmac1000_dma_init_rx(struct stmmac_priv *priv,
dma_addr_t dma_rx_phy, u32 chan)
{
/* RX descriptor base address list must be written into DMA CSR3 */
- writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
+ writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RCV_BASE_ADDR(chan));
}
static void dwmac1000_dma_init_tx(struct stmmac_priv *priv,
@@ -125,7 +127,7 @@ static void dwmac1000_dma_init_tx(struct stmmac_priv *priv,
dma_addr_t dma_tx_phy, u32 chan)
{
/* TX descriptor base address list must be written into DMA CSR4 */
- writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
+ writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
}
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
@@ -153,7 +155,7 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel));
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
@@ -175,14 +177,14 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
/* Configure flow control based on rx fifo size */
csr6 = dwmac1000_configure_fc(csr6, fifosz);
- writel(csr6, ioaddr + DMA_CONTROL);
+ writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel));
}
static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv,
void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel));
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
@@ -209,7 +211,7 @@ static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv,
csr6 |= DMA_CONTROL_TTC_256;
}
- writel(csr6, ioaddr + DMA_CONTROL);
+ writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel));
}
static void dwmac1000_dump_dma_regs(struct stmmac_priv *priv,
@@ -271,12 +273,12 @@ static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
static void dwmac1000_rx_watchdog(struct stmmac_priv *priv,
void __iomem *ioaddr, u32 riwt, u32 queue)
{
- writel(riwt, ioaddr + DMA_RX_WATCHDOG);
+ writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue));
}
const struct stmmac_dma_ops dwmac1000_dma_ops = {
.reset = dwmac_dma_reset,
- .init = dwmac1000_dma_init,
+ .init_chan = dwmac1000_dma_init_channel,
.init_rx_chan = dwmac1000_dma_init_rx,
.init_tx_chan = dwmac1000_dma_init_tx,
.axi = dwmac1000_dma_axi,
@@ -294,3 +296,4 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = {
.get_hw_feature = dwmac1000_get_hw_feature,
.rx_watchdog = dwmac1000_rx_watchdog,
};
+EXPORT_SYMBOL_GPL(dwmac1000_dma_ops);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index b402fb54f613..82957db47c99 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -19,7 +19,7 @@
#include "dwmac_dma.h"
static void dwmac100_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
/* Enable Application Access by writing to DMA CSR0 */
writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 31c387cc5f26..e65a65666cc1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
#include "dwmac4.h"
@@ -58,10 +59,6 @@ static void dwmac4_core_init(struct mac_device_info *hw,
if (hw->pcs)
value |= GMAC_PCS_IRQ_DEFAULT;
- /* Enable FPE interrupt */
- if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
- value |= GMAC_INT_FPE_EN;
-
writel(value, ioaddr + GMAC_INT_EN);
if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
@@ -475,7 +472,7 @@ static int dwmac4_write_vlan_filter(struct net_device *dev,
u8 index, u32 data)
{
void __iomem *ioaddr = (void __iomem *)dev->base_addr;
- int i, timeout = 10;
+ int ret;
u32 val;
if (index >= hw->num_vlan)
@@ -491,16 +488,15 @@ static int dwmac4_write_vlan_filter(struct net_device *dev,
writel(val, ioaddr + GMAC_VLAN_TAG);
- for (i = 0; i < timeout; i++) {
- val = readl(ioaddr + GMAC_VLAN_TAG);
- if (!(val & GMAC_VLAN_TAG_CTRL_OB))
- return 0;
- udelay(1);
+ ret = readl_poll_timeout(ioaddr + GMAC_VLAN_TAG, val,
+ !(val & GMAC_VLAN_TAG_CTRL_OB),
+ 1000, 500000);
+ if (ret) {
+ netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
+ return -EBUSY;
}
- netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
-
- return -EBUSY;
+ return 0;
}
static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
@@ -1268,6 +1264,9 @@ const struct stmmac_ops dwmac410_ops = {
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
+ .fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size,
+ .fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size,
+ .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
@@ -1320,6 +1319,9 @@ const struct stmmac_ops dwmac510_ops = {
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
+ .fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size,
+ .fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size,
+ .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 1c5802e0d7f4..e99401bcc1f8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -186,10 +186,12 @@ static void dwmac4_set_tx_owner(struct dma_desc *p)
static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 |= cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
+ u32 flags = (RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
if (!disable_rx_ic)
- p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
+ flags |= RDES3_INT_ON_COMPLETION_EN;
+
+ p->des3 |= cpu_to_le32(flags);
}
static int dwmac4_get_tx_ls(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 84d3a8551b03..e0165358c4ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -153,7 +153,7 @@ static void dwmac410_dma_init_channel(struct stmmac_priv *priv,
}
static void dwmac4_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index e02cebc3f1b7..08add508db84 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -575,11 +575,11 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
- bool enable)
+ bool tx_enable, bool pmac_enable)
{
u32 value;
- if (enable) {
+ if (tx_enable) {
cfg->fpe_csr = EFPE;
value = readl(ioaddr + GMAC_RXQ_CTRL1);
value &= ~GMAC_RXQCTRL_FPRQ;
@@ -589,6 +589,21 @@ void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
cfg->fpe_csr = 0;
}
writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS);
+
+ value = readl(ioaddr + GMAC_INT_EN);
+
+ if (pmac_enable) {
+ if (!(value & GMAC_INT_FPE_EN)) {
+ /* Dummy read to clear any pending masked interrupts */
+ readl(ioaddr + MAC_FPE_CTRL_STS);
+
+ value |= GMAC_INT_FPE_EN;
+ }
+ } else {
+ value &= ~GMAC_INT_FPE_EN;
+ }
+
+ writel(value, ioaddr + GMAC_INT_EN);
}
int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
@@ -605,22 +620,22 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
if (value & TRSP) {
status |= FPE_EVENT_TRSP;
- netdev_info(dev, "FPE: Respond mPacket is transmitted\n");
+ netdev_dbg(dev, "FPE: Respond mPacket is transmitted\n");
}
if (value & TVER) {
status |= FPE_EVENT_TVER;
- netdev_info(dev, "FPE: Verify mPacket is transmitted\n");
+ netdev_dbg(dev, "FPE: Verify mPacket is transmitted\n");
}
if (value & RRSP) {
status |= FPE_EVENT_RRSP;
- netdev_info(dev, "FPE: Respond mPacket is received\n");
+ netdev_dbg(dev, "FPE: Respond mPacket is received\n");
}
if (value & RVER) {
status |= FPE_EVENT_RVER;
- netdev_info(dev, "FPE: Verify mPacket is received\n");
+ netdev_dbg(dev, "FPE: Verify mPacket is received\n");
}
return status;
@@ -638,3 +653,72 @@ void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
writel(value, ioaddr + MAC_FPE_CTRL_STS);
}
+
+int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr)
+{
+ return FIELD_GET(DWMAC5_ADD_FRAG_SZ, readl(ioaddr + MTL_FPE_CTRL_STS));
+}
+
+void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size)
+{
+ u32 value;
+
+ value = readl(ioaddr + MTL_FPE_CTRL_STS);
+ writel(u32_replace_bits(value, add_frag_size, DWMAC5_ADD_FRAG_SZ),
+ ioaddr + MTL_FPE_CTRL_STS);
+}
+
+#define ALG_ERR_MSG "TX algorithm SP is not suitable for one-to-many mapping"
+#define WEIGHT_ERR_MSG "TXQ weight %u differs across other TXQs in TC: [%u]"
+
+int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass)
+{
+ u32 val, offset, count, queue_weight, preemptible_txqs = 0;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ u32 num_tc = ndev->num_tc;
+
+ if (!pclass)
+ goto update_mapping;
+
+ /* DWMAC CORE4+ can not program TC:TXQ mapping to hardware.
+ *
+ * Synopsys Databook:
+ * "The number of Tx DMA channels is equal to the number of Tx queues,
+ * and is direct one-to-one mapping."
+ */
+ for (u32 tc = 0; tc < num_tc; tc++) {
+ count = ndev->tc_to_txq[tc].count;
+ offset = ndev->tc_to_txq[tc].offset;
+
+ if (pclass & BIT(tc))
+ preemptible_txqs |= GENMASK(offset + count - 1, offset);
+
+ /* This is 1:1 mapping, go to next TC */
+ if (count == 1)
+ continue;
+
+ if (priv->plat->tx_sched_algorithm == MTL_TX_ALGORITHM_SP) {
+ NL_SET_ERR_MSG_MOD(extack, ALG_ERR_MSG);
+ return -EINVAL;
+ }
+
+ queue_weight = priv->plat->tx_queues_cfg[offset].weight;
+
+ for (u32 i = 1; i < count; i++) {
+ if (priv->plat->tx_queues_cfg[offset + i].weight !=
+ queue_weight) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, WEIGHT_ERR_MSG,
+ queue_weight, tc);
+ return -EINVAL;
+ }
+ }
+ }
+
+update_mapping:
+ val = readl(priv->ioaddr + MTL_FPE_CTRL_STS);
+ writel(u32_replace_bits(val, preemptible_txqs, DWMAC5_PREEMPTION_CLASS),
+ priv->ioaddr + MTL_FPE_CTRL_STS);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index bf33a51d229e..6c6eb6790e83 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -39,6 +39,12 @@
#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10))
#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10))
+#define MTL_FPE_CTRL_STS 0x00000c90
+/* Preemption Classification */
+#define DWMAC5_PREEMPTION_CLASS GENMASK(15, 8)
+/* Additional Fragment Size of preempted frames */
+#define DWMAC5_ADD_FRAG_SZ GENMASK(1, 0)
+
#define MTL_RXP_CONTROL_STATUS 0x00000ca0
#define RXPI BIT(31)
#define NPE GENMASK(23, 16)
@@ -104,10 +110,14 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
u32 sub_second_inc, u32 systime_flags);
void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
- bool enable);
+ bool tx_enable, bool pmac_enable);
void dwmac5_fpe_send_mpacket(void __iomem *ioaddr,
struct stmmac_fpe_cfg *cfg,
enum stmmac_mpacket_type type);
int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev);
+int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr);
+void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size);
+int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass);
#endif /* __DWMAC5_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 72672391675f..5d9c18f5bbf5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -22,6 +22,31 @@
#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+/* Following DMA defines are channels oriented */
+#define DMA_CHAN_BASE_OFFSET 0x100
+
+static inline u32 dma_chan_base_addr(u32 base, u32 chan)
+{
+ return base + chan * DMA_CHAN_BASE_OFFSET;
+}
+
+#define DMA_CHAN_BUS_MODE(chan) dma_chan_base_addr(DMA_BUS_MODE, chan)
+#define DMA_CHAN_XMT_POLL_DEMAND(chan) \
+ dma_chan_base_addr(DMA_XMT_POLL_DEMAND, chan)
+#define DMA_CHAN_RCV_POLL_DEMAND(chan) \
+ dma_chan_base_addr(DMA_RCV_POLL_DEMAND, chan)
+#define DMA_CHAN_RCV_BASE_ADDR(chan) \
+ dma_chan_base_addr(DMA_RCV_BASE_ADDR, chan)
+#define DMA_CHAN_TX_BASE_ADDR(chan) \
+ dma_chan_base_addr(DMA_TX_BASE_ADDR, chan)
+#define DMA_CHAN_STATUS(chan) dma_chan_base_addr(DMA_STATUS, chan)
+#define DMA_CHAN_CONTROL(chan) dma_chan_base_addr(DMA_CONTROL, chan)
+#define DMA_CHAN_INTR_ENA(chan) dma_chan_base_addr(DMA_INTR_ENA, chan)
+#define DMA_CHAN_MISSED_FRAME_CTR(chan) \
+ dma_chan_base_addr(DMA_MISSED_FRAME_CTR, chan)
+#define DMA_CHAN_RX_WATCHDOG(chan) \
+ dma_chan_base_addr(DMA_RX_WATCHDOG, chan)
+
/* SW Reset */
#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
@@ -152,7 +177,7 @@
#define NUM_DWMAC1000_DMA_REGS 23
#define NUM_DWMAC4_DMA_REGS 27
-void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan);
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 85e18f9a22f9..4846bf49c576 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -28,65 +28,65 @@ int dwmac_dma_reset(void __iomem *ioaddr)
}
/* CSR1 enables the transmit DMA to check for new descriptor */
-void dwmac_enable_dma_transmission(void __iomem *ioaddr)
+void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
{
- writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
+ writel(1, ioaddr + DMA_CHAN_XMT_POLL_DEMAND(chan));
}
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
- u32 value = readl(ioaddr + DMA_INTR_ENA);
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
if (rx)
value |= DMA_INTR_DEFAULT_RX;
if (tx)
value |= DMA_INTR_DEFAULT_TX;
- writel(value, ioaddr + DMA_INTR_ENA);
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
- u32 value = readl(ioaddr + DMA_INTR_ENA);
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
if (rx)
value &= ~DMA_INTR_DEFAULT_RX;
if (tx)
value &= ~DMA_INTR_DEFAULT_TX;
- writel(value, ioaddr + DMA_INTR_ENA);
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
void dwmac_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value |= DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value &= ~DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value |= DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value &= ~DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
#ifdef DWMAC_DMA_DEBUG
@@ -165,7 +165,7 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
/* read the status register (CSR5) */
- u32 intr_status = readl(ioaddr + DMA_STATUS);
+ u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
#ifdef DWMAC_DMA_DEBUG
/* Enable it to monitor DMA rx/tx status in case of critical problems */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index f196cd99d510..f519d43738b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -846,42 +846,41 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
};
-#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
-#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
-
+static const char dpp_rx_err[] = "Read Rx Descriptor Parity checker Error";
+static const char dpp_tx_err[] = "Read Tx Descriptor Parity checker Error";
static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
- { true, "TDPES0", DPP_TX_ERR },
- { true, "TDPES1", DPP_TX_ERR },
- { true, "TDPES2", DPP_TX_ERR },
- { true, "TDPES3", DPP_TX_ERR },
- { true, "TDPES4", DPP_TX_ERR },
- { true, "TDPES5", DPP_TX_ERR },
- { true, "TDPES6", DPP_TX_ERR },
- { true, "TDPES7", DPP_TX_ERR },
- { true, "TDPES8", DPP_TX_ERR },
- { true, "TDPES9", DPP_TX_ERR },
- { true, "TDPES10", DPP_TX_ERR },
- { true, "TDPES11", DPP_TX_ERR },
- { true, "TDPES12", DPP_TX_ERR },
- { true, "TDPES13", DPP_TX_ERR },
- { true, "TDPES14", DPP_TX_ERR },
- { true, "TDPES15", DPP_TX_ERR },
- { true, "RDPES0", DPP_RX_ERR },
- { true, "RDPES1", DPP_RX_ERR },
- { true, "RDPES2", DPP_RX_ERR },
- { true, "RDPES3", DPP_RX_ERR },
- { true, "RDPES4", DPP_RX_ERR },
- { true, "RDPES5", DPP_RX_ERR },
- { true, "RDPES6", DPP_RX_ERR },
- { true, "RDPES7", DPP_RX_ERR },
- { true, "RDPES8", DPP_RX_ERR },
- { true, "RDPES9", DPP_RX_ERR },
- { true, "RDPES10", DPP_RX_ERR },
- { true, "RDPES11", DPP_RX_ERR },
- { true, "RDPES12", DPP_RX_ERR },
- { true, "RDPES13", DPP_RX_ERR },
- { true, "RDPES14", DPP_RX_ERR },
- { true, "RDPES15", DPP_RX_ERR },
+ { true, "TDPES0", dpp_tx_err },
+ { true, "TDPES1", dpp_tx_err },
+ { true, "TDPES2", dpp_tx_err },
+ { true, "TDPES3", dpp_tx_err },
+ { true, "TDPES4", dpp_tx_err },
+ { true, "TDPES5", dpp_tx_err },
+ { true, "TDPES6", dpp_tx_err },
+ { true, "TDPES7", dpp_tx_err },
+ { true, "TDPES8", dpp_tx_err },
+ { true, "TDPES9", dpp_tx_err },
+ { true, "TDPES10", dpp_tx_err },
+ { true, "TDPES11", dpp_tx_err },
+ { true, "TDPES12", dpp_tx_err },
+ { true, "TDPES13", dpp_tx_err },
+ { true, "TDPES14", dpp_tx_err },
+ { true, "TDPES15", dpp_tx_err },
+ { true, "RDPES0", dpp_rx_err },
+ { true, "RDPES1", dpp_rx_err },
+ { true, "RDPES2", dpp_rx_err },
+ { true, "RDPES3", dpp_rx_err },
+ { true, "RDPES4", dpp_rx_err },
+ { true, "RDPES5", dpp_rx_err },
+ { true, "RDPES6", dpp_rx_err },
+ { true, "RDPES7", dpp_rx_err },
+ { true, "RDPES8", dpp_rx_err },
+ { true, "RDPES9", dpp_rx_err },
+ { true, "RDPES10", dpp_rx_err },
+ { true, "RDPES11", dpp_rx_err },
+ { true, "RDPES12", dpp_rx_err },
+ { true, "RDPES13", dpp_rx_err },
+ { true, "RDPES14", dpp_rx_err },
+ { true, "RDPES15", dpp_rx_err },
};
static void dwxgmac3_handle_dma_err(struct net_device *ndev,
@@ -1505,13 +1504,14 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
-static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
- u32 num_txq,
- u32 num_rxq, bool enable)
+static void dwxgmac3_fpe_configure(void __iomem *ioaddr,
+ struct stmmac_fpe_cfg *cfg,
+ u32 num_txq, u32 num_rxq,
+ bool tx_enable, bool pmac_enable)
{
u32 value;
- if (!enable) {
+ if (!tx_enable) {
value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
value &= ~XGMAC_EFPE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index fc82862a612c..389aad7b5c1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -56,10 +56,12 @@ static void dwxgmac2_set_tx_owner(struct dma_desc *p)
static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 |= cpu_to_le32(XGMAC_RDES3_OWN);
+ u32 flags = XGMAC_RDES3_OWN;
if (!disable_rx_ic)
- p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC);
+ flags |= XGMAC_RDES3_IOC;
+
+ p->des3 |= cpu_to_le32(flags);
}
static int dwxgmac2_get_tx_ls(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index dd2ab6185c40..7840bc403788 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -20,7 +20,7 @@ static int dwxgmac2_dma_reset(void __iomem *ioaddr)
}
static void dwxgmac2_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 29367105df54..88cce28b2f98 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -171,7 +171,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwmac4_ops,
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
- .tc = &dwmac510_tc_ops,
+ .tc = &dwmac4_tc_ops,
.mmc = &dwmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwmac4_setup,
@@ -252,7 +252,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwxgmac210_ops,
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
- .tc = &dwmac510_tc_ops,
+ .tc = &dwxgmac_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxgmac2_setup,
@@ -273,7 +273,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwxlgmac2_ops,
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
- .tc = &dwmac510_tc_ops,
+ .tc = &dwxgmac_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxlgmac2_setup,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index e53c32362774..d5a9f01ecac5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/stmmac.h>
+#include <net/pkt_cls.h>
#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \
({ \
@@ -28,6 +29,8 @@
struct stmmac_extra_stats;
struct stmmac_priv;
struct stmmac_safety_stats;
+struct stmmac_fpe_cfg;
+enum stmmac_mpacket_type;
struct dma_desc;
struct dma_extended_desc;
struct dma_edesc;
@@ -175,8 +178,7 @@ struct dma_features;
struct stmmac_dma_ops {
/* DMA core initialization */
int (*reset)(void __iomem *ioaddr);
- void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
- int atds);
+ void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg);
void (*init_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan);
void (*init_rx_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -198,7 +200,7 @@ struct stmmac_dma_ops {
/* To track extra statistic (if supported) */
void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x,
void __iomem *ioaddr);
- void (*enable_dma_transmission) (void __iomem *ioaddr);
+ void (*enable_dma_transmission)(void __iomem *ioaddr, u32 chan);
void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -420,11 +422,16 @@ struct stmmac_ops {
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
- bool enable);
+ bool tx_enable, bool pmac_enable);
void (*fpe_send_mpacket)(void __iomem *ioaddr,
struct stmmac_fpe_cfg *cfg,
enum stmmac_mpacket_type type);
int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
+ int (*fpe_get_add_frag_size)(const void __iomem *ioaddr);
+ void (*fpe_set_add_frag_size)(void __iomem *ioaddr, u32 add_frag_size);
+ int (*fpe_map_preemption_class)(struct net_device *ndev,
+ struct netlink_ext_ack *extack,
+ u32 pclass);
};
#define stmmac_core_init(__priv, __args...) \
@@ -529,6 +536,12 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args)
#define stmmac_fpe_irq_status(__priv, __args...) \
stmmac_do_callback(__priv, mac, fpe_irq_status, __args)
+#define stmmac_fpe_get_add_frag_size(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, fpe_get_add_frag_size, __args)
+#define stmmac_fpe_set_add_frag_size(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_set_add_frag_size, __args)
+#define stmmac_fpe_map_preemption_class(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_map_preemption_class, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -616,6 +629,8 @@ struct stmmac_tc_ops {
struct tc_etf_qopt_offload *qopt);
int (*query_caps)(struct stmmac_priv *priv,
struct tc_query_caps_base *base);
+ int (*setup_mqprio)(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *qopt);
};
#define stmmac_tc_init(__priv, __args...) \
@@ -632,6 +647,8 @@ struct stmmac_tc_ops {
stmmac_do_callback(__priv, tc, setup_etf, __args)
#define stmmac_tc_query_caps(__priv, __args...) \
stmmac_do_callback(__priv, tc, query_caps, __args)
+#define stmmac_tc_setup_mqprio(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_mqprio, __args)
struct stmmac_counters;
@@ -675,7 +692,9 @@ extern const struct stmmac_dma_ops dwmac4_dma_ops;
extern const struct stmmac_ops dwmac410_ops;
extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
+extern const struct stmmac_tc_ops dwmac4_tc_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
+extern const struct stmmac_tc_ops dwxgmac_tc_ops;
extern const struct stmmac_ops dwxgmac210_ops;
extern const struct stmmac_ops dwxlgmac2_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b23b920eedb1..ea135203ff2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -146,6 +146,32 @@ struct stmmac_channel {
u32 index;
};
+/* FPE link-partner hand-shaking mPacket type */
+enum stmmac_mpacket_type {
+ MPACKET_VERIFY = 0,
+ MPACKET_RESPONSE = 1,
+};
+
+#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 3
+#define STMMAC_FPE_MM_MAX_VERIFY_TIME_MS 128
+
+struct stmmac_fpe_cfg {
+ /* Serialize access to MAC Merge state between ethtool requests
+ * and link state updates.
+ */
+ spinlock_t lock;
+
+ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
+
+ enum ethtool_mm_verify_status status;
+ struct timer_list verify_timer;
+ bool verify_enabled;
+ int verify_retries;
+ bool pmac_enabled;
+ u32 verify_time;
+ bool tx_enabled;
+};
+
struct stmmac_tc_entry {
bool in_use;
bool in_hw;
@@ -339,11 +365,8 @@ struct stmmac_priv {
struct workqueue_struct *wq;
struct work_struct service_task;
- /* Workqueue for handling FPE hand-shaking */
- unsigned long fpe_task_state;
- struct workqueue_struct *fpe_wq;
- struct work_struct fpe_task;
- char wq_name[IFNAMSIZ + 4];
+ /* Frame Preemption feature (FPE) */
+ struct stmmac_fpe_cfg fpe_cfg;
/* TC Handling */
unsigned int tc_entries_max;
@@ -397,7 +420,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv);
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
-void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable);
+void stmmac_fpe_apply(struct stmmac_priv *priv);
static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 7008219fd88d..2a37592a6281 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -19,6 +19,7 @@
#include "stmmac.h"
#include "dwmac_dma.h"
#include "dwxgmac2.h"
+#include "dwmac5.h"
#define REG_SPACE_SIZE 0x1060
#define GMAC4_REG_SPACE_SIZE 0x116C
@@ -438,13 +439,6 @@ static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
}
-static int stmmac_check_if_running(struct net_device *dev)
-{
- if (!netif_running(dev))
- return -EBUSY;
- return 0;
-}
-
static int stmmac_ethtool_get_regs_len(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -1207,13 +1201,13 @@ static int stmmac_get_ts_info(struct net_device *dev,
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (priv->ptp_clock)
info->phc_index = ptp_clock_index(priv->ptp_clock);
+ else
+ info->phc_index = 0;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
@@ -1270,10 +1264,101 @@ static int stmmac_set_tunable(struct net_device *dev,
return ret;
}
+static int stmmac_get_mm(struct net_device *ndev,
+ struct ethtool_mm_state *state)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned long flags;
+ u32 frag_size;
+
+ if (!priv->dma_cap.fpesel)
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&priv->fpe_cfg.lock, flags);
+
+ state->max_verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
+ state->verify_enabled = priv->fpe_cfg.verify_enabled;
+ state->pmac_enabled = priv->fpe_cfg.pmac_enabled;
+ state->verify_time = priv->fpe_cfg.verify_time;
+ state->tx_enabled = priv->fpe_cfg.tx_enabled;
+ state->verify_status = priv->fpe_cfg.status;
+ state->rx_min_frag_size = ETH_ZLEN;
+
+ /* FPE active if common tx_enabled and
+ * (verification success or disabled(forced))
+ */
+ if (state->tx_enabled &&
+ (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED ||
+ state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED))
+ state->tx_active = true;
+ else
+ state->tx_active = false;
+
+ frag_size = stmmac_fpe_get_add_frag_size(priv, priv->ioaddr);
+ state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size);
+
+ spin_unlock_irqrestore(&priv->fpe_cfg.lock, flags);
+
+ return 0;
+}
+
+static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+ unsigned long flags;
+ u32 frag_size;
+ int err;
+
+ err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size,
+ &frag_size, extack);
+ if (err)
+ return err;
+
+ /* Wait for the verification that's currently in progress to finish */
+ timer_shutdown_sync(&fpe_cfg->verify_timer);
+
+ spin_lock_irqsave(&fpe_cfg->lock, flags);
+
+ fpe_cfg->verify_enabled = cfg->verify_enabled;
+ fpe_cfg->pmac_enabled = cfg->pmac_enabled;
+ fpe_cfg->verify_time = cfg->verify_time;
+ fpe_cfg->tx_enabled = cfg->tx_enabled;
+
+ if (!cfg->verify_enabled)
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+
+ stmmac_fpe_set_add_frag_size(priv, priv->ioaddr, frag_size);
+ stmmac_fpe_apply(priv);
+
+ spin_unlock_irqrestore(&fpe_cfg->lock, flags);
+
+ return 0;
+}
+
+static void stmmac_get_mm_stats(struct net_device *ndev,
+ struct ethtool_mm_stats *s)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_counters *mmc = &priv->mmc;
+
+ if (!priv->dma_cap.rmon)
+ return;
+
+ stmmac_mmc_read(priv, priv->mmcaddr, mmc);
+
+ s->MACMergeFrameAssErrorCount = mmc->mmc_rx_packet_assembly_err_cntr;
+ s->MACMergeFrameAssOkCount = mmc->mmc_rx_packet_assembly_ok_cntr;
+ s->MACMergeFrameSmdErrorCount = mmc->mmc_rx_packet_smd_err_cntr;
+ s->MACMergeFragCountRx = mmc->mmc_rx_fpe_fragment_cntr;
+ s->MACMergeFragCountTx = mmc->mmc_tx_fpe_fragment_cntr;
+ s->MACMergeHoldCount = mmc->mmc_tx_hold_req_cntr;
+}
+
static const struct ethtool_ops stmmac_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
- .begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
.get_msglevel = stmmac_ethtool_getmsglevel,
.set_msglevel = stmmac_ethtool_setmsglevel,
@@ -1309,6 +1394,9 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.set_tunable = stmmac_set_tunable,
.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
+ .get_mm = stmmac_get_mm,
+ .set_mm = stmmac_set_mm,
+ .get_mm_stats = stmmac_get_mm_stats,
};
void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f3a1b179aaea..d3895d7eecfc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -968,18 +968,31 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
{
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+ unsigned long flags;
- if (is_up && *hs_enable) {
- stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
- MPACKET_VERIFY);
+ timer_shutdown_sync(&fpe_cfg->verify_timer);
+
+ spin_lock_irqsave(&fpe_cfg->lock, flags);
+
+ if (is_up && fpe_cfg->pmac_enabled) {
+ /* VERIFY process requires pmac enabled when NIC comes up */
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ false, true);
+
+ /* New link => maybe new partner => new verification process */
+ stmmac_fpe_apply(priv);
} else {
- *lo_state = FPE_STATE_OFF;
- *lp_state = FPE_STATE_OFF;
+ /* No link => turn off EFPE */
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ false, false);
}
+
+ spin_unlock_irqrestore(&fpe_cfg->lock, flags);
}
static void stmmac_mac_link_down(struct phylink_config *config,
@@ -2367,9 +2380,11 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
- /* Adjust for real per queue fifo size */
- rxfifosz /= rx_channels_count;
- txfifosz /= tx_channels_count;
+ /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
+ rxfifosz /= rx_channels_count;
+ txfifosz /= tx_channels_count;
+ }
if (priv->plat->force_thresh_dma_mode) {
txmode = tc;
@@ -2553,7 +2568,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
true, priv->mode, true, true,
xdp_desc.len);
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
xsk_tx_metadata_to_compl(meta,
&tx_q->tx_skbuff_dma[entry].xsk_meta);
@@ -3003,7 +3018,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
struct stmmac_rx_queue *rx_q;
struct stmmac_tx_queue *tx_q;
u32 chan = 0;
- int atds = 0;
int ret = 0;
if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
@@ -3012,7 +3026,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
}
if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
- atds = 1;
+ priv->plat->dma_cfg->atds = 1;
ret = stmmac_reset(priv, priv->ioaddr);
if (ret) {
@@ -3021,7 +3035,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
}
/* DMA Configuration */
- stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
if (priv->plat->axi)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
@@ -3357,27 +3371,6 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
}
}
-static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
-{
- char *name;
-
- clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
- clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
-
- name = priv->wq_name;
- sprintf(name, "%s-fpe", priv->dev->name);
-
- priv->fpe_wq = create_singlethread_workqueue(name);
- if (!priv->fpe_wq) {
- netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
-
- return -ENOMEM;
- }
- netdev_info(priv->dev, "FPE workqueue start");
-
- return 0;
-}
-
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
@@ -3532,13 +3525,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
stmmac_set_hw_vlan_mode(priv, priv->hw);
- if (priv->dma_cap.fpesel) {
- stmmac_fpe_start_wq(priv);
-
- if (priv->plat->fpe_cfg->enable)
- stmmac_fpe_handshake(priv, true);
- }
-
return 0;
}
@@ -4035,18 +4021,6 @@ static int stmmac_open(struct net_device *dev)
return ret;
}
-static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
-{
- set_bit(__FPE_REMOVING, &priv->fpe_task_state);
-
- if (priv->fpe_wq) {
- destroy_workqueue(priv->fpe_wq);
- priv->fpe_wq = NULL;
- }
-
- netdev_info(priv->dev, "FPE workqueue stop");
-}
-
/**
* stmmac_release - close entry point of the driver
* @dev : device pointer.
@@ -4094,10 +4068,10 @@ static int stmmac_release(struct net_device *dev)
stmmac_release_ptp(priv);
- pm_runtime_put(priv->device);
-
if (priv->dma_cap.fpesel)
- stmmac_fpe_stop_wq(priv);
+ timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
+
+ pm_runtime_put(priv->device);
return 0;
}
@@ -4754,7 +4728,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -4981,7 +4955,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
u64_stats_update_end(&txq_stats->q_syncp);
}
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
@@ -5981,45 +5955,31 @@ static int stmmac_set_features(struct net_device *netdev,
static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
{
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
- if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
- return;
+ /* This is interrupt context, just spin_lock() */
+ spin_lock(&fpe_cfg->lock);
- /* If LP has sent verify mPacket, LP is FPE capable */
- if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
- if (*lp_state < FPE_STATE_CAPABLE)
- *lp_state = FPE_STATE_CAPABLE;
+ if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
+ goto unlock_out;
- /* If user has requested FPE enable, quickly response */
- if (*hs_enable)
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- fpe_cfg,
- MPACKET_RESPONSE);
- }
-
- /* If Local has sent verify mPacket, Local is FPE capable */
- if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
- if (*lo_state < FPE_STATE_CAPABLE)
- *lo_state = FPE_STATE_CAPABLE;
- }
+ /* LP has sent verify mPacket */
+ if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
+ MPACKET_RESPONSE);
- /* If LP has sent response mPacket, LP is entering FPE ON */
- if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
- *lp_state = FPE_STATE_ENTERING_ON;
+ /* Local has sent verify mPacket */
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
- /* If Local has sent response mPacket, Local is entering FPE ON */
- if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
- *lo_state = FPE_STATE_ENTERING_ON;
+ /* LP has sent response mPacket */
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
+ fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
- if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
- !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
- priv->fpe_wq) {
- queue_work(priv->fpe_wq, &priv->fpe_task);
- }
+unlock_out:
+ spin_unlock(&fpe_cfg->lock);
}
static void stmmac_common_interrupt(struct stmmac_priv *priv)
@@ -6256,6 +6216,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
switch (type) {
case TC_QUERY_CAPS:
return stmmac_tc_query_caps(priv, priv, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return stmmac_tc_setup_mqprio(priv, priv, type_data);
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple(type_data,
&stmmac_block_cb_list,
@@ -7375,68 +7337,87 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
return ret;
}
-#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
-static void stmmac_fpe_lp_task(struct work_struct *work)
+/**
+ * stmmac_fpe_verify_timer - Timer for MAC Merge verification
+ * @t: timer_list struct containing private info
+ *
+ * Verify the MAC Merge capability in the local TX direction, by
+ * transmitting Verify mPackets up to 3 times. Wait until link
+ * partner responds with a Response mPacket, otherwise fail.
+ */
+static void stmmac_fpe_verify_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
- fpe_task);
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
- bool *enable = &fpe_cfg->enable;
- int retries = 20;
-
- while (retries-- > 0) {
- /* Bail out immediately if FPE handshake is OFF */
- if (*lo_state == FPE_STATE_OFF || !*hs_enable)
- break;
-
- if (*lo_state == FPE_STATE_ENTERING_ON &&
- *lp_state == FPE_STATE_ENTERING_ON) {
- stmmac_fpe_configure(priv, priv->ioaddr,
- fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- *enable);
+ struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
+ struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
+ fpe_cfg);
+ unsigned long flags;
+ bool rearm = false;
- netdev_info(priv->dev, "configured FPE\n");
+ spin_lock_irqsave(&fpe_cfg->lock, flags);
- *lo_state = FPE_STATE_ON;
- *lp_state = FPE_STATE_ON;
- netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
- break;
- }
-
- if ((*lo_state == FPE_STATE_CAPABLE ||
- *lo_state == FPE_STATE_ENTERING_ON) &&
- *lp_state != FPE_STATE_ON) {
- netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
- *lo_state, *lp_state);
+ switch (fpe_cfg->status) {
+ case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
+ case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
+ if (fpe_cfg->verify_retries != 0) {
stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- fpe_cfg,
- MPACKET_VERIFY);
+ fpe_cfg, MPACKET_VERIFY);
+ rearm = true;
+ } else {
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
}
- /* Sleep then retry */
- msleep(500);
+
+ fpe_cfg->verify_retries--;
+ break;
+
+ case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ true, true);
+ break;
+
+ default:
+ break;
+ }
+
+ if (rearm) {
+ mod_timer(&fpe_cfg->verify_timer,
+ jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
}
- clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
+ spin_unlock_irqrestore(&fpe_cfg->lock, flags);
}
-void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
+static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
{
- if (priv->plat->fpe_cfg->hs_enable != enable) {
- if (enable) {
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- MPACKET_VERIFY);
- } else {
- priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
- priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
- }
+ if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
+ fpe_cfg->verify_enabled &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
+ timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
+ mod_timer(&fpe_cfg->verify_timer, jiffies);
+ }
+}
- priv->plat->fpe_cfg->hs_enable = enable;
+void stmmac_fpe_apply(struct stmmac_priv *priv)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+
+ /* If verification is disabled, configure FPE right away.
+ * Otherwise let the timer code do it.
+ */
+ if (!fpe_cfg->verify_enabled) {
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ fpe_cfg->tx_enabled,
+ fpe_cfg->pmac_enabled);
+ } else {
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
+ fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
+
+ if (netif_running(priv->dev))
+ stmmac_fpe_verify_timer_arm(fpe_cfg);
}
}
@@ -7554,9 +7535,6 @@ int stmmac_dvr_probe(struct device *device,
INIT_WORK(&priv->service_task, stmmac_service_task);
- /* Initialize Link Partner FPE workqueue */
- INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
-
/* Override with kernel parameters if supplied XXX CRS XXX
* this needs to have multiple instances
*/
@@ -7721,6 +7699,12 @@ int stmmac_dvr_probe(struct device *device,
mutex_init(&priv->lock);
+ priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
+ priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
+ priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+ timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
+ spin_lock_init(&priv->fpe_cfg.lock);
+
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
* changed at run-time and it is fixed. Viceversa the driver'll try to
@@ -7894,16 +7878,8 @@ int stmmac_suspend(struct device *dev)
}
rtnl_unlock();
- if (priv->dma_cap.fpesel) {
- /* Disable FPE */
- stmmac_fpe_configure(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use, false);
-
- stmmac_fpe_handshake(priv, false);
- stmmac_fpe_stop_wq(priv);
- }
+ if (priv->dma_cap.fpesel)
+ timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
priv->speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 996f2bcd07a2..75ad2da1a37f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -282,16 +282,6 @@ static int tc_init(struct stmmac_priv *priv)
if (ret)
return -ENOMEM;
- if (!priv->plat->fpe_cfg) {
- priv->plat->fpe_cfg = devm_kzalloc(priv->device,
- sizeof(*priv->plat->fpe_cfg),
- GFP_KERNEL);
- if (!priv->plat->fpe_cfg)
- return -ENOMEM;
- } else {
- memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg));
- }
-
/* Fail silently as we can still use remaining features, e.g. CBS */
if (!dma_cap->frpsel)
return 0;
@@ -396,6 +386,7 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
return ret;
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ return 0;
}
/* Final adjustments for HW */
@@ -941,9 +932,9 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
+ struct netlink_ext_ack *extack = qopt->mqprio.extack;
struct timespec64 time, current_time, qopt_time;
ktime_t current_time_ns;
- bool fpe = false;
int i, ret = 0;
u64 ctr;
@@ -1028,16 +1019,12 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
switch (qopt->entries[i].command) {
case TC_TAPRIO_CMD_SET_GATES:
- if (fpe)
- return -EINVAL;
break;
case TC_TAPRIO_CMD_SET_AND_HOLD:
gates |= BIT(0);
- fpe = true;
break;
case TC_TAPRIO_CMD_SET_AND_RELEASE:
gates &= ~BIT(0);
- fpe = true;
break;
default:
return -EOPNOTSUPP;
@@ -1068,16 +1055,6 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
tc_taprio_map_maxsdu_txq(priv, qopt);
- if (fpe && !priv->dma_cap.fpesel) {
- mutex_unlock(&priv->est_lock);
- return -EOPNOTSUPP;
- }
-
- /* Actual FPE register configuration will be done after FPE handshake
- * is success.
- */
- priv->plat->fpe_cfg->enable = fpe;
-
ret = stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->est_lock);
@@ -1086,12 +1063,10 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
goto disable;
}
- netdev_info(priv->dev, "configured EST\n");
-
- if (fpe) {
- stmmac_fpe_handshake(priv, true);
- netdev_info(priv->dev, "start FPE handshake\n");
- }
+ ret = stmmac_fpe_map_preemption_class(priv, priv->dev, extack,
+ qopt->mqprio.preemptible_tcs);
+ if (ret)
+ goto disable;
return 0;
@@ -1109,16 +1084,7 @@ disable:
mutex_unlock(&priv->est_lock);
}
- priv->plat->fpe_cfg->enable = false;
- stmmac_fpe_configure(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- false);
- netdev_info(priv->dev, "disabled FPE\n");
-
- stmmac_fpe_handshake(priv, false);
- netdev_info(priv->dev, "stop FPE handshake\n");
+ stmmac_fpe_map_preemption_class(priv, priv->dev, extack, 0);
return ret;
}
@@ -1174,6 +1140,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return err;
}
+static int tc_setup_taprio_without_fpe(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ if (!qopt->mqprio.preemptible_tcs)
+ return tc_setup_taprio(priv, qopt);
+
+ NL_SET_ERR_MSG_MOD(qopt->mqprio.extack,
+ "taprio with FPE is not implemented for this MAC");
+
+ return -EOPNOTSUPP;
+}
+
static int tc_setup_etf(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt)
{
@@ -1198,6 +1176,13 @@ static int tc_query_caps(struct stmmac_priv *priv,
struct tc_query_caps_base *base)
{
switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
@@ -1214,6 +1199,81 @@ static int tc_query_caps(struct stmmac_priv *priv,
}
}
+static void stmmac_reset_tc_mqprio(struct net_device *ndev,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ netdev_reset_tc(ndev);
+ netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
+ stmmac_fpe_map_preemption_class(priv, ndev, extack, 0);
+}
+
+static int tc_setup_dwmac510_mqprio(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct netlink_ext_ack *extack = mqprio->extack;
+ struct tc_mqprio_qopt *qopt = &mqprio->qopt;
+ u32 offset, count, num_stack_tx_queues = 0;
+ struct net_device *ndev = priv->dev;
+ u32 num_tc = qopt->num_tc;
+ int err;
+
+ if (!num_tc) {
+ stmmac_reset_tc_mqprio(ndev, extack);
+ return 0;
+ }
+
+ err = netdev_set_num_tc(ndev, num_tc);
+ if (err)
+ return err;
+
+ for (u32 tc = 0; tc < num_tc; tc++) {
+ offset = qopt->offset[tc];
+ count = qopt->count[tc];
+ num_stack_tx_queues += count;
+
+ err = netdev_set_tc_queue(ndev, tc, count, offset);
+ if (err)
+ goto err_reset_tc;
+ }
+
+ err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ if (err)
+ goto err_reset_tc;
+
+ err = stmmac_fpe_map_preemption_class(priv, ndev, extack,
+ mqprio->preemptible_tcs);
+ if (err)
+ goto err_reset_tc;
+
+ return 0;
+
+err_reset_tc:
+ stmmac_reset_tc_mqprio(ndev, extack);
+
+ return err;
+}
+
+static int tc_setup_mqprio_unimplemented(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "mqprio HW offload is not implemented for this MAC");
+ return -EOPNOTSUPP;
+}
+
+const struct stmmac_tc_ops dwmac4_tc_ops = {
+ .init = tc_init,
+ .setup_cls_u32 = tc_setup_cls_u32,
+ .setup_cbs = tc_setup_cbs,
+ .setup_cls = tc_setup_cls,
+ .setup_taprio = tc_setup_taprio_without_fpe,
+ .setup_etf = tc_setup_etf,
+ .query_caps = tc_query_caps,
+ .setup_mqprio = tc_setup_mqprio_unimplemented,
+};
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
@@ -1222,4 +1282,16 @@ const struct stmmac_tc_ops dwmac510_tc_ops = {
.setup_taprio = tc_setup_taprio,
.setup_etf = tc_setup_etf,
.query_caps = tc_query_caps,
+ .setup_mqprio = tc_setup_dwmac510_mqprio,
+};
+
+const struct stmmac_tc_ops dwxgmac_tc_ops = {
+ .init = tc_init,
+ .setup_cls_u32 = tc_setup_cls_u32,
+ .setup_cbs = tc_setup_cbs,
+ .setup_cls = tc_setup_cls,
+ .setup_taprio = tc_setup_taprio_without_fpe,
+ .setup_etf = tc_setup_etf,
+ .query_caps = tc_query_caps,
+ .setup_mqprio = tc_setup_mqprio_unimplemented,
};
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 2f30715e9b67..1e887d951a04 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -114,37 +114,23 @@ static void vnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct vnet *vp = (struct vnet *)netdev_priv(dev);
struct vnet_port *port;
- char *p = (char *)buf;
switch (stringset) {
case ETH_SS_STATS:
memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
- p += sizeof(ethtool_stats_keys);
+ buf += sizeof(ethtool_stats_keys);
rcu_read_lock();
list_for_each_entry_rcu(port, &vp->port_list, list) {
- snprintf(p, ETH_GSTRING_LEN, "p%u.%s-%pM",
- port->q_index, port->switch_port ? "s" : "q",
- port->raddr);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.rx_packets",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.tx_packets",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.rx_bytes",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.tx_bytes",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.event_up",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.event_reset",
- port->q_index);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&buf, "p%u.%s-%pM", port->q_index,
+ port->switch_port ? "s" : "q",
+ port->raddr);
+ ethtool_sprintf(&buf, "p%u.rx_packets", port->q_index);
+ ethtool_sprintf(&buf, "p%u.tx_packets", port->q_index);
+ ethtool_sprintf(&buf, "p%u.rx_bytes", port->q_index);
+ ethtool_sprintf(&buf, "p%u.tx_bytes", port->q_index);
+ ethtool_sprintf(&buf, "p%u.event_up", port->q_index);
+ ethtool_sprintf(&buf, "p%u.event_reset", port->q_index);
}
rcu_read_unlock();
break;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index ede5f7890fb4..fc77f424f90b 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1671,7 +1671,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
#endif
#ifdef BDX_LLTX
- netif_trans_update(ndev); /* NETIF_F_LLTX driver :( */
+ netif_trans_update(ndev); /* dev->lltx driver :( */
#endif
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
@@ -2019,7 +2019,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* set multicast list callback has to use priv->tx_lock.
*/
#ifdef BDX_LLTX
- ndev->features |= NETIF_F_LLTX;
+ ndev->lltx = true;
#endif
/* MTU range: 60 - 16384 */
ndev->min_mtu = ETH_ZLEN;
diff --git a/drivers/net/ethernet/tehuti/tehuti.h b/drivers/net/ethernet/tehuti/tehuti.h
index 909e7296cecf..47a2d3e5f8ed 100644
--- a/drivers/net/ethernet/tehuti/tehuti.h
+++ b/drivers/net/ethernet/tehuti/tehuti.h
@@ -260,7 +260,7 @@ struct bdx_priv {
int tx_update_mark;
int tx_noupd;
#endif
- spinlock_t tx_lock; /* NETIF_F_LLTX mode */
+ spinlock_t tx_lock; /* dev->lltx mode */
/* rarely used */
u8 port;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index b60976947da5..9032444435e9 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -427,9 +427,9 @@ static void am65_cpsw_get_channels(struct net_device *ndev,
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- ch->max_rx = AM65_CPSW_MAX_RX_QUEUES;
- ch->max_tx = AM65_CPSW_MAX_TX_QUEUES;
- ch->rx_count = AM65_CPSW_MAX_RX_QUEUES;
+ ch->max_rx = AM65_CPSW_MAX_QUEUES;
+ ch->max_tx = AM65_CPSW_MAX_QUEUES;
+ ch->rx_count = common->rx_ch_num_flows;
ch->tx_count = common->tx_ch_num;
}
@@ -447,9 +447,8 @@ static int am65_cpsw_set_channels(struct net_device *ndev,
if (common->usage_count)
return -EBUSY;
- am65_cpsw_nuss_remove_tx_chns(common);
-
- return am65_cpsw_nuss_update_tx_chns(common, chs->tx_count);
+ return am65_cpsw_nuss_update_tx_rx_chns(common, chs->tx_count,
+ chs->rx_count);
}
static void
@@ -714,8 +713,6 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = am65_cpts_phc_index(common->cpts);
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
@@ -915,80 +912,64 @@ static void am65_cpsw_get_mm_stats(struct net_device *ndev,
s->MACMergeHoldCount = readl(base + AM65_CPSW_STATN_IET_TX_HOLD);
}
-static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
- struct kernel_ethtool_coalesce *kernel_coal,
- struct netlink_ext_ack *extack)
-{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- struct am65_cpsw_tx_chn *tx_chn;
-
- tx_chn = &common->tx_chns[0];
-
- coal->rx_coalesce_usecs = common->rx_pace_timeout / 1000;
- coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
-
- return 0;
-}
-
static int am65_cpsw_get_per_queue_coalesce(struct net_device *ndev, u32 queue,
struct ethtool_coalesce *coal)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_rx_flow *rx_flow;
struct am65_cpsw_tx_chn *tx_chn;
- if (queue >= AM65_CPSW_MAX_TX_QUEUES)
+ if (queue >= AM65_CPSW_MAX_QUEUES)
return -EINVAL;
tx_chn = &common->tx_chns[queue];
-
coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
+ rx_flow = &common->rx_chns.flows[queue];
+ coal->rx_coalesce_usecs = rx_flow->rx_pace_timeout / 1000;
+
return 0;
}
-static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- struct am65_cpsw_tx_chn *tx_chn;
-
- tx_chn = &common->tx_chns[0];
-
- if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
- return -EINVAL;
-
- if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
- return -EINVAL;
-
- common->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
- tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
-
- return 0;
+ return am65_cpsw_get_per_queue_coalesce(ndev, 0, coal);
}
static int am65_cpsw_set_per_queue_coalesce(struct net_device *ndev, u32 queue,
struct ethtool_coalesce *coal)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_rx_flow *rx_flow;
struct am65_cpsw_tx_chn *tx_chn;
- if (queue >= AM65_CPSW_MAX_TX_QUEUES)
+ if (queue >= AM65_CPSW_MAX_QUEUES)
return -EINVAL;
tx_chn = &common->tx_chns[queue];
-
- if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20) {
- dev_info(common->dev, "defaulting to min value of 20us for tx-usecs for tx-%u\n",
- queue);
- coal->tx_coalesce_usecs = 20;
- }
+ if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
+ return -EINVAL;
tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
+ rx_flow = &common->rx_chns.flows[queue];
+ if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
+ return -EINVAL;
+
+ rx_flow->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
+
return 0;
}
+static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ return am65_cpsw_set_per_queue_coalesce(ndev, 0, coal);
+}
+
const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.begin = am65_cpsw_ethtool_op_begin,
.complete = am65_cpsw_ethtool_op_complete,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 81d9f21086ec..0520e9f4bea7 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -138,7 +138,7 @@
AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN)
#define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
-/* Number of TX/RX descriptors */
+/* Number of TX/RX descriptors per channel/flow */
#define AM65_CPSW_MAX_TX_DESC 500
#define AM65_CPSW_MAX_RX_DESC 500
@@ -150,18 +150,20 @@
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
#define AM65_CPSW_DEFAULT_TX_CHNS 8
+#define AM65_CPSW_DEFAULT_RX_CHN_FLOWS 1
/* CPPI streaming packet interface */
#define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF
#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
/* XDP */
-#define AM65_CPSW_XDP_CONSUMED 2
-#define AM65_CPSW_XDP_REDIRECT 1
+#define AM65_CPSW_XDP_CONSUMED BIT(1)
+#define AM65_CPSW_XDP_REDIRECT BIT(0)
#define AM65_CPSW_XDP_PASS 0
/* Include headroom compatible with both skb and xdpf */
-#define AM65_CPSW_HEADROOM (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
+#define AM65_CPSW_HEADROOM_NA (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
+#define AM65_CPSW_HEADROOM ALIGN(AM65_CPSW_HEADROOM_NA, sizeof(long))
static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
const u8 *dev_addr)
@@ -330,7 +332,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
}
static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
- struct page *page)
+ struct page *page, u32 flow_idx)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct cppi5_host_desc_t *desc_rx;
@@ -363,7 +365,8 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
swdata = cppi5_hdesc_get_swdata(desc_rx);
*((void **)swdata) = page_address(page);
- return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx,
+ desc_rx, desc_dma);
}
void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
@@ -398,22 +401,27 @@ static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
- int i;
+ int id, port;
- for (i = 0; i < common->port_num; i++) {
- if (!common->ports[i].ndev)
- continue;
+ for (id = 0; id < common->rx_ch_num_flows; id++) {
+ flow = &rx_chn->flows[id];
- rxq = &common->ports[i].xdp_rxq;
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ continue;
- if (xdp_rxq_info_is_reg(rxq))
- xdp_rxq_info_unreg(rxq);
- }
+ rxq = &common->ports[port].xdp_rxq[id];
+
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+ }
- if (rx_chn->page_pool) {
- page_pool_destroy(rx_chn->page_pool);
- rx_chn->page_pool = NULL;
+ if (flow->page_pool) {
+ page_pool_destroy(flow->page_pool);
+ flow->page_pool = NULL;
+ }
}
}
@@ -427,31 +435,44 @@ static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
.nid = dev_to_node(common->dev),
.dev = common->dev,
.dma_dir = DMA_BIDIRECTIONAL,
- .napi = &common->napi_rx,
+ /* .napi set dynamically */
};
+ struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
struct page_pool *pool;
- int i, ret;
-
- pool = page_pool_create(&pp_params);
- if (IS_ERR(pool))
- return PTR_ERR(pool);
+ int id, port, ret;
+
+ for (id = 0; id < common->rx_ch_num_flows; id++) {
+ flow = &rx_chn->flows[id];
+ pp_params.napi = &flow->napi_rx;
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ goto err;
+ }
- rx_chn->page_pool = pool;
+ flow->page_pool = pool;
- for (i = 0; i < common->port_num; i++) {
- if (!common->ports[i].ndev)
- continue;
+ /* using same page pool is allowed as no running rx handlers
+ * simultaneously for both ndevs
+ */
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ continue;
- rxq = &common->ports[i].xdp_rxq;
+ rxq = &common->ports[port].xdp_rxq[id];
- ret = xdp_rxq_info_reg(rxq, common->ports[i].ndev, i, 0);
- if (ret)
- goto err;
+ ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
+ id, flow->napi_rx.napi_id);
+ if (ret)
+ goto err;
- ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
- if (ret)
- goto err;
+ ret = xdp_rxq_info_reg_mem_model(rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool);
+ if (ret)
+ goto err;
+ }
}
return 0;
@@ -496,25 +517,27 @@ static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_ch
desc_idx);
}
-static inline void am65_cpsw_put_page(struct am65_cpsw_rx_chn *rx_chn,
+static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
struct page *page,
bool allow_direct,
int desc_idx)
{
- page_pool_put_full_page(rx_chn->page_pool, page, allow_direct);
- rx_chn->pages[desc_idx] = NULL;
+ page_pool_put_full_page(flow->page_pool, page, allow_direct);
+ flow->pages[desc_idx] = NULL;
}
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
{
- struct am65_cpsw_rx_chn *rx_chn = data;
+ struct am65_cpsw_rx_flow *flow = data;
struct cppi5_host_desc_t *desc_rx;
+ struct am65_cpsw_rx_chn *rx_chn;
dma_addr_t buf_dma;
u32 buf_dma_len;
void *page_addr;
void **swdata;
int desc_idx;
+ rx_chn = &flow->common->rx_chns;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
page_addr = *swdata;
@@ -525,7 +548,7 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
rx_chn->dsize_log2);
- am65_cpsw_put_page(rx_chn, virt_to_page(page_addr), false, desc_idx);
+ am65_cpsw_put_page(flow, virt_to_page(page_addr), false, desc_idx);
}
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
@@ -601,7 +624,8 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
struct am65_cpsw_host *host_p = am65_common_get_host(common);
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
- int port_idx, i, ret, tx;
+ int port_idx, i, ret, tx, flow_idx;
+ struct am65_cpsw_rx_flow *flow;
u32 val, port_mask;
struct page *page;
@@ -669,27 +693,26 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
return ret;
}
- for (i = 0; i < rx_chn->descs_num; i++) {
- page = page_pool_dev_alloc_pages(rx_chn->page_pool);
- if (!page) {
- ret = -ENOMEM;
- if (i)
+ for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) {
+ flow = &rx_chn->flows[flow_idx];
+ for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
+ page = page_pool_dev_alloc_pages(flow->page_pool);
+ if (!page) {
+ dev_err(common->dev, "cannot allocate page in flow %d\n",
+ flow_idx);
+ ret = -ENOMEM;
goto fail_rx;
+ }
+ flow->pages[i] = page;
- return ret;
- }
- rx_chn->pages[i] = page;
-
- ret = am65_cpsw_nuss_rx_push(common, page);
- if (ret < 0) {
- dev_err(common->dev,
- "cannot submit page to channel rx: %d\n",
- ret);
- am65_cpsw_put_page(rx_chn, page, false, i);
- if (i)
+ ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
+ if (ret < 0) {
+ dev_err(common->dev,
+ "cannot submit page to rx channel flow %d, error %d\n",
+ flow_idx, ret);
+ am65_cpsw_put_page(flow, page, false, i);
goto fail_rx;
-
- return ret;
+ }
}
}
@@ -699,6 +722,14 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
goto fail_rx;
}
+ for (i = 0; i < common->rx_ch_num_flows ; i++) {
+ napi_enable(&rx_chn->flows[i].napi_rx);
+ if (rx_chn->flows[i].irq_disabled) {
+ rx_chn->flows[i].irq_disabled = false;
+ enable_irq(rx_chn->flows[i].irq);
+ }
+ }
+
for (tx = 0; tx < common->tx_ch_num; tx++) {
ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
if (ret) {
@@ -710,12 +741,6 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
napi_enable(&tx_chn[tx].napi_tx);
}
- napi_enable(&common->napi_rx);
- if (common->rx_irq_disabled) {
- common->rx_irq_disabled = false;
- enable_irq(rx_chn->irq);
- }
-
dev_dbg(common->dev, "cpsw_nuss started\n");
return 0;
@@ -726,11 +751,24 @@ fail_tx:
tx--;
}
+ for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) {
+ flow = &rx_chn->flows[flow_idx];
+ if (!flow->irq_disabled) {
+ disable_irq(flow->irq);
+ flow->irq_disabled = true;
+ }
+ napi_disable(&flow->napi_rx);
+ }
+
k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
fail_rx:
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, 0, rx_chn,
- am65_cpsw_nuss_rx_cleanup, 0);
+ for (i = 0; i < common->rx_ch_num_flows; i++)
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
+ am65_cpsw_nuss_rx_cleanup, 0);
+
+ am65_cpsw_destroy_xdp_rxqs(common);
+
return ret;
}
@@ -779,12 +817,12 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
dev_err(common->dev, "rx teardown timeout\n");
}
- napi_disable(&common->napi_rx);
- hrtimer_cancel(&common->rx_hrtimer);
-
- for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
- am65_cpsw_nuss_rx_cleanup, !!i);
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ napi_disable(&rx_chn->flows[i].napi_rx);
+ hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
+ am65_cpsw_nuss_rx_cleanup, 0);
+ }
k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
@@ -793,10 +831,6 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
- for (i = 0; i < rx_chn->descs_num; i++) {
- if (rx_chn->pages[i])
- am65_cpsw_put_page(rx_chn, rx_chn->pages[i], false, i);
- }
am65_cpsw_destroy_xdp_rxqs(common);
dev_dbg(common->dev, "cpsw_nuss stopped\n");
@@ -867,7 +901,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
goto runtime_put;
}
- ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
+ ret = netif_set_real_num_rx_queues(ndev, common->rx_ch_num_flows);
if (ret) {
dev_err(common->dev, "cannot set real number of rx queues\n");
goto runtime_put;
@@ -933,7 +967,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
if (unlikely(!host_desc)) {
ndev->stats.tx_dropped++;
- return -ENOMEM;
+ return AM65_CPSW_XDP_CONSUMED; /* drop */
}
am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type);
@@ -942,7 +976,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
pkt_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) {
ndev->stats.tx_dropped++;
- ret = -ENOMEM;
+ ret = AM65_CPSW_XDP_CONSUMED; /* drop */
goto pool_free;
}
@@ -977,6 +1011,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
/* Inform BQL */
netdev_tx_completed_queue(netif_txq, 1, pkt_len);
ndev->stats.tx_errors++;
+ ret = AM65_CPSW_XDP_CONSUMED; /* drop */
goto dma_unmap;
}
@@ -990,13 +1025,15 @@ pool_free:
return ret;
}
-static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
+static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
struct am65_cpsw_port *port,
struct xdp_buff *xdp,
int desc_idx, int cpu, int *len)
{
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_common *common = flow->common;
+ struct am65_cpsw_ndev_priv *ndev_priv;
struct net_device *ndev = port->ndev;
+ struct am65_cpsw_ndev_stats *stats;
int ret = AM65_CPSW_XDP_CONSUMED;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
@@ -1004,6 +1041,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
struct bpf_prog *prog;
struct page *page;
u32 act;
+ int err;
prog = READ_ONCE(port->xdp_prog);
if (!prog)
@@ -1013,41 +1051,49 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
/* XDP prog might have changed packet data and boundaries */
*len = xdp->data_end - xdp->data;
+ ndev_priv = netdev_priv(ndev);
+ stats = this_cpu_ptr(ndev_priv->stats);
+
switch (act) {
case XDP_PASS:
ret = AM65_CPSW_XDP_PASS;
goto out;
case XDP_TX:
- tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES];
+ tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES];
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
- break;
+ goto drop;
__netif_tx_lock(netif_txq, cpu);
- ret = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
+ err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
AM65_CPSW_TX_BUF_TYPE_XDP_TX);
__netif_tx_unlock(netif_txq);
- if (ret)
- break;
+ if (err)
+ goto drop;
- ndev->stats.rx_bytes += *len;
- ndev->stats.rx_packets++;
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_bytes += *len;
+ stats->rx_packets++;
+ u64_stats_update_end(&stats->syncp);
ret = AM65_CPSW_XDP_CONSUMED;
goto out;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
- break;
+ goto drop;
- ndev->stats.rx_bytes += *len;
- ndev->stats.rx_packets++;
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_bytes += *len;
+ stats->rx_packets++;
+ u64_stats_update_end(&stats->syncp);
ret = AM65_CPSW_XDP_REDIRECT;
goto out;
default:
bpf_warn_invalid_xdp_action(ndev, prog, act);
fallthrough;
case XDP_ABORTED:
+drop:
trace_xdp_exception(ndev, prog, act);
fallthrough;
case XDP_DROP:
@@ -1055,7 +1101,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
}
page = virt_to_head_page(xdp->data);
- am65_cpsw_put_page(rx_chn, page, true, desc_idx);
+ am65_cpsw_put_page(flow, page, true, desc_idx);
out:
return ret;
@@ -1094,11 +1140,12 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
}
}
-static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
- u32 flow_idx, int cpu)
+static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
+ int cpu, int *xdp_state)
{
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
+ struct am65_cpsw_common *common = flow->common;
struct am65_cpsw_ndev_priv *ndev_priv;
struct am65_cpsw_ndev_stats *stats;
struct cppi5_host_desc_t *desc_rx;
@@ -1108,12 +1155,14 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
struct am65_cpsw_port *port;
int headroom, desc_idx, ret;
struct net_device *ndev;
+ u32 flow_idx = flow->id;
struct sk_buff *skb;
struct xdp_buff xdp;
void *page_addr;
void **swdata;
u32 *psdata;
+ *xdp_state = AM65_CPSW_XDP_PASS;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
if (ret) {
if (ret != -ENODATA)
@@ -1161,15 +1210,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
}
if (port->xdp_prog) {
- xdp_init_buff(&xdp, AM65_CPSW_MAX_PACKET_SIZE, &port->xdp_rxq);
-
- xdp_prepare_buff(&xdp, page_addr, skb_headroom(skb),
+ xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
+ xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
pkt_len, false);
-
- ret = am65_cpsw_run_xdp(common, port, &xdp, desc_idx,
- cpu, &pkt_len);
- if (ret != AM65_CPSW_XDP_PASS)
- return ret;
+ *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, desc_idx,
+ cpu, &pkt_len);
+ if (*xdp_state != AM65_CPSW_XDP_PASS)
+ goto allocate;
/* Compute additional headroom to be reserved */
headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
@@ -1184,7 +1231,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
skb_mark_for_recycle(skb);
skb->protocol = eth_type_trans(skb, ndev);
am65_cpsw_nuss_rx_csum(skb, csum_info);
- napi_gro_receive(&common->napi_rx, skb);
+ napi_gro_receive(&flow->napi_rx, skb);
stats = this_cpu_ptr(ndev_priv->stats);
@@ -1193,21 +1240,25 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
stats->rx_bytes += pkt_len;
u64_stats_update_end(&stats->syncp);
- new_page = page_pool_dev_alloc_pages(rx_chn->page_pool);
- if (unlikely(!new_page))
+allocate:
+ new_page = page_pool_dev_alloc_pages(flow->page_pool);
+ if (unlikely(!new_page)) {
+ dev_err(dev, "page alloc failed\n");
return -ENOMEM;
- rx_chn->pages[desc_idx] = new_page;
+ }
+
+ flow->pages[desc_idx] = new_page;
if (netif_dormant(ndev)) {
- am65_cpsw_put_page(rx_chn, new_page, true, desc_idx);
+ am65_cpsw_put_page(flow, new_page, true, desc_idx);
ndev->stats.rx_dropped++;
return 0;
}
requeue:
- ret = am65_cpsw_nuss_rx_push(common, new_page);
+ ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx);
if (WARN_ON(ret < 0)) {
- am65_cpsw_put_page(rx_chn, new_page, true, desc_idx);
+ am65_cpsw_put_page(flow, new_page, true, desc_idx);
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
}
@@ -1217,54 +1268,48 @@ requeue:
static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer)
{
- struct am65_cpsw_common *common =
- container_of(timer, struct am65_cpsw_common, rx_hrtimer);
+ struct am65_cpsw_rx_flow *flow = container_of(timer,
+ struct am65_cpsw_rx_flow,
+ rx_hrtimer);
- enable_irq(common->rx_chns.irq);
+ enable_irq(flow->irq);
return HRTIMER_NORESTART;
}
static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
- struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
- int flow = AM65_CPSW_MAX_RX_FLOWS;
+ struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx);
+ struct am65_cpsw_common *common = flow->common;
int cpu = smp_processor_id();
- bool xdp_redirect = false;
+ int xdp_state_or = 0;
int cur_budget, ret;
+ int xdp_state;
int num_rx = 0;
- /* process every flow */
- while (flow--) {
- cur_budget = budget - num_rx;
-
- while (cur_budget--) {
- ret = am65_cpsw_nuss_rx_packets(common, flow, cpu);
- if (ret) {
- if (ret == AM65_CPSW_XDP_REDIRECT)
- xdp_redirect = true;
- break;
- }
- num_rx++;
- }
-
- if (num_rx >= budget)
+ /* process only this flow */
+ cur_budget = budget;
+ while (cur_budget--) {
+ ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state);
+ xdp_state_or |= xdp_state;
+ if (ret)
break;
+ num_rx++;
}
- if (xdp_redirect)
+ if (xdp_state_or & AM65_CPSW_XDP_REDIRECT)
xdp_do_flush();
dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
- if (common->rx_irq_disabled) {
- common->rx_irq_disabled = false;
- if (unlikely(common->rx_pace_timeout)) {
- hrtimer_start(&common->rx_hrtimer,
- ns_to_ktime(common->rx_pace_timeout),
+ if (flow->irq_disabled) {
+ flow->irq_disabled = false;
+ if (unlikely(flow->rx_pace_timeout)) {
+ hrtimer_start(&flow->rx_hrtimer,
+ ns_to_ktime(flow->rx_pace_timeout),
HRTIMER_MODE_REL_PINNED);
} else {
- enable_irq(common->rx_chns.irq);
+ enable_irq(flow->irq);
}
}
}
@@ -1512,11 +1557,11 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
{
- struct am65_cpsw_common *common = dev_id;
+ struct am65_cpsw_rx_flow *flow = dev_id;
- common->rx_irq_disabled = true;
+ flow->irq_disabled = true;
disable_irq_nosync(irq);
- napi_schedule(&common->napi_rx);
+ napi_schedule(&flow->napi_rx);
return IRQ_HANDLED;
}
@@ -1918,12 +1963,13 @@ static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
static int am65_cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
struct xdp_frame **frames, u32 flags)
{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
int cpu = smp_processor_id();
int i, nxmit = 0;
- tx_chn = &am65_ndev_to_common(ndev)->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES];
+ tx_chn = &common->tx_chns[cpu % common->tx_ch_num];
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
__netif_tx_lock(netif_txq, cpu);
@@ -2160,7 +2206,7 @@ static void am65_cpsw_nuss_free_tx_chns(void *data)
}
}
-void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
+static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
int i;
@@ -2175,15 +2221,9 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
devm_free_irq(dev, tx_chn->irq, tx_chn);
netif_napi_del(&tx_chn->napi_tx);
-
- if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
- k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
-
- if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
- k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
-
- memset(tx_chn, 0, sizeof(*tx_chn));
}
+
+ am65_cpsw_nuss_free_tx_chns(common);
}
static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
@@ -2315,19 +2355,22 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
}
-static void am65_cpsw_nuss_remove_rx_chns(void *data)
+static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common)
{
- struct am65_cpsw_common *common = data;
struct device *dev = common->dev;
struct am65_cpsw_rx_chn *rx_chn;
+ struct am65_cpsw_rx_flow *flows;
+ int i;
rx_chn = &common->rx_chns;
+ flows = rx_chn->flows;
devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
- if (!(rx_chn->irq < 0))
- devm_free_irq(dev, rx_chn->irq, common);
-
- netif_napi_del(&common->napi_rx);
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ if (!(flows[i].irq < 0))
+ devm_free_irq(dev, flows[i].irq, &flows[i]);
+ netif_napi_del(&flows[i].napi_rx);
+ }
am65_cpsw_nuss_free_rx_chns(common);
@@ -2340,6 +2383,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
u32 max_desc_num = AM65_CPSW_MAX_RX_DESC;
struct device *dev = common->dev;
+ struct am65_cpsw_rx_flow *flow;
u32 hdesc_size, hdesc_size_out;
u32 fdqring_id;
int i, ret = 0;
@@ -2348,12 +2392,21 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
AM65_CPSW_NAV_SW_DATA_SIZE);
rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
- rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS;
+ rx_cfg.flow_id_num = common->rx_ch_num_flows;
rx_cfg.flow_id_base = common->rx_flow_id_base;
/* init all flows */
rx_chn->dev = dev;
- rx_chn->descs_num = max_desc_num;
+ rx_chn->descs_num = max_desc_num * rx_cfg.flow_id_num;
+
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ flow = &rx_chn->flows[i];
+ flow->page_pool = NULL;
+ flow->pages = devm_kcalloc(dev, AM65_CPSW_MAX_RX_DESC,
+ sizeof(*flow->pages), GFP_KERNEL);
+ if (!flow->pages)
+ return -ENOMEM;
+ }
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
if (IS_ERR(rx_chn->rx_chn)) {
@@ -2376,13 +2429,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
rx_chn->dsize_log2 = __fls(hdesc_size_out);
WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2));
- rx_chn->page_pool = NULL;
-
- rx_chn->pages = devm_kcalloc(dev, rx_chn->descs_num,
- sizeof(*rx_chn->pages), GFP_KERNEL);
- if (!rx_chn->pages)
- return -ENOMEM;
-
common->rx_flow_id_base =
k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
@@ -2406,6 +2452,10 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
};
+ flow = &rx_chn->flows[i];
+ flow->id = i;
+ flow->common = common;
+
rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
rx_flow_cfg.rx_cfg.size = max_desc_num;
rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
@@ -2422,30 +2472,37 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
i);
- rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
-
- if (rx_chn->irq < 0) {
+ flow->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+ if (flow->irq <= 0) {
dev_err(dev, "Failed to get rx dma irq %d\n",
- rx_chn->irq);
- ret = rx_chn->irq;
+ flow->irq);
+ ret = flow->irq;
goto err;
}
- }
-
- netif_napi_add(common->dma_ndev, &common->napi_rx,
- am65_cpsw_nuss_rx_poll);
- hrtimer_init(&common->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- common->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
- ret = devm_request_irq(dev, rx_chn->irq,
- am65_cpsw_nuss_rx_irq,
- IRQF_TRIGGER_HIGH, dev_name(dev), common);
- if (ret) {
- dev_err(dev, "failure requesting rx irq %u, %d\n",
- rx_chn->irq, ret);
- goto err;
+ snprintf(flow->name,
+ sizeof(flow->name), "%s-rx%d",
+ dev_name(dev), i);
+ netif_napi_add(common->dma_ndev, &flow->napi_rx,
+ am65_cpsw_nuss_rx_poll);
+ hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
+
+ ret = devm_request_irq(dev, flow->irq,
+ am65_cpsw_nuss_rx_irq,
+ IRQF_TRIGGER_HIGH,
+ flow->name, flow);
+ if (ret) {
+ dev_err(dev, "failure requesting rx %d irq %u, %d\n",
+ i, flow->irq, ret);
+ goto err;
+ }
}
+ /* setup classifier to route priorities to flows */
+ cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows);
+
err:
i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
if (i) {
@@ -2687,10 +2744,9 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
return 0;
/* alloc netdev */
- port->ndev = devm_alloc_etherdev_mqs(common->dev,
- sizeof(struct am65_cpsw_ndev_priv),
- AM65_CPSW_MAX_TX_QUEUES,
- AM65_CPSW_MAX_RX_QUEUES);
+ port->ndev = alloc_etherdev_mqs(sizeof(struct am65_cpsw_ndev_priv),
+ AM65_CPSW_MAX_QUEUES,
+ AM65_CPSW_MAX_QUEUES);
if (!port->ndev) {
dev_err(dev, "error allocating slave net_device %u\n",
port->port_id);
@@ -2761,7 +2817,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
}
phylink = phylink_create(&port->slave.phylink_config,
- of_node_to_fwnode(port->slave.port_np),
+ of_fwnode_handle(port->slave.port_np),
port->slave.phy_if,
&am65_cpsw_phylink_mac_ops);
if (IS_ERR(phylink))
@@ -2811,8 +2867,12 @@ static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
- if (port->ndev && port->ndev->reg_state == NETREG_REGISTERED)
+ if (!port->ndev)
+ continue;
+ if (port->ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(port->ndev);
+ free_netdev(port->ndev);
+ port->ndev = NULL;
}
}
@@ -3287,9 +3347,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
}
- for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
- k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
- am65_cpsw_nuss_rx_cleanup, !!i);
+ for (i = 0; i < common->rx_ch_num_flows; i++)
+ k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
+ &rx_chan->flows[i],
+ am65_cpsw_nuss_rx_cleanup, 0);
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
@@ -3330,12 +3391,21 @@ err_cleanup_ndev:
return ret;
}
-int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
+int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
+ int num_tx, int num_rx)
{
int ret;
+ am65_cpsw_nuss_remove_tx_chns(common);
+ am65_cpsw_nuss_remove_rx_chns(common);
+
common->tx_ch_num = num_tx;
+ common->rx_ch_num_flows = num_rx;
ret = am65_cpsw_nuss_init_tx_chns(common);
+ if (ret)
+ return ret;
+
+ ret = am65_cpsw_nuss_init_rx_chns(common);
return ret;
}
@@ -3465,6 +3535,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete);
common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
+ common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
common->pf_p0_rx_ptype_rrobin = false;
common->default_vlan = 1;
@@ -3545,16 +3616,17 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
ret = am65_cpsw_nuss_init_ndevs(common);
if (ret)
- goto err_free_phylink;
+ goto err_ndevs_clear;
ret = am65_cpsw_nuss_register_ndevs(common);
if (ret)
- goto err_free_phylink;
+ goto err_ndevs_clear;
pm_runtime_put(dev);
return 0;
-err_free_phylink:
+err_ndevs_clear:
+ am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
err_of_clear:
@@ -3584,13 +3656,13 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev)
return;
}
- am65_cpsw_unregister_devlink(common);
am65_cpsw_unregister_notifiers(common);
/* must unregister ndevs here because DD release_driver routine calls
* dma_deconfigure(dev) before devres_release_all(dev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
+ am65_cpsw_unregister_devlink(common);
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
am65_cpsw_disable_serdes_phy(common);
@@ -3656,8 +3728,10 @@ static int am65_cpsw_nuss_resume(struct device *dev)
return ret;
/* If RX IRQ was disabled before suspend, keep it disabled */
- if (common->rx_irq_disabled)
- disable_irq(common->rx_chns.irq);
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ if (common->rx_chns.flows[i].irq_disabled)
+ disable_irq(common->rx_chns.flows[i].irq);
+ }
am65_cpts_resume(common->cpts);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index e2ce2be320bd..dc8d544230dc 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -21,9 +21,7 @@ struct am65_cpts;
#define HOST_PORT_NUM 0
-#define AM65_CPSW_MAX_TX_QUEUES 8
-#define AM65_CPSW_MAX_RX_QUEUES 1
-#define AM65_CPSW_MAX_RX_FLOWS 1
+#define AM65_CPSW_MAX_QUEUES 8 /* both TX & RX */
#define AM65_CPSW_PORT_VLAN_REG_OFFSET 0x014
@@ -58,7 +56,7 @@ struct am65_cpsw_port {
struct am65_cpsw_qos qos;
struct devlink_port devlink_port;
struct bpf_prog *xdp_prog;
- struct xdp_rxq_info xdp_rxq;
+ struct xdp_rxq_info xdp_rxq[AM65_CPSW_MAX_QUEUES];
/* Only for suspend resume context */
u32 vid_context;
};
@@ -94,16 +92,27 @@ struct am65_cpsw_tx_chn {
u32 rate_mbps;
};
+struct am65_cpsw_rx_flow {
+ u32 id;
+ struct napi_struct napi_rx;
+ struct am65_cpsw_common *common;
+ int irq;
+ bool irq_disabled;
+ struct hrtimer rx_hrtimer;
+ unsigned long rx_pace_timeout;
+ struct page_pool *page_pool;
+ struct page **pages;
+ char name[32];
+};
+
struct am65_cpsw_rx_chn {
struct device *dev;
struct device *dma_dev;
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_rx_channel *rx_chn;
- struct page_pool *page_pool;
- struct page **pages;
u32 descs_num;
unsigned char dsize_log2;
- int irq;
+ struct am65_cpsw_rx_flow flows[AM65_CPSW_MAX_QUEUES];
};
#define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
@@ -145,16 +154,12 @@ struct am65_cpsw_common {
u32 tx_ch_rate_msk;
u32 rx_flow_id_base;
- struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_TX_QUEUES];
+ struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_QUEUES];
struct completion tdown_complete;
atomic_t tdown_cnt;
+ int rx_ch_num_flows;
struct am65_cpsw_rx_chn rx_chns;
- struct napi_struct napi_rx;
-
- bool rx_irq_disabled;
- struct hrtimer rx_hrtimer;
- unsigned long rx_pace_timeout;
u32 nuss_ver;
u32 cpsw_ver;
@@ -203,8 +208,8 @@ struct am65_cpsw_ndev_priv {
#define am65_common_get_host(common) (&(common)->host)
#define am65_common_get_port(common, id) (&(common)->ports[(id) - 1])
-#define am65_cpsw_napi_to_common(pnapi) \
- container_of(pnapi, struct am65_cpsw_common, napi_rx)
+#define am65_cpsw_napi_to_rx_flow(pnapi) \
+ container_of(pnapi, struct am65_cpsw_rx_flow, napi_rx)
#define am65_cpsw_napi_to_tx_chn(pnapi) \
container_of(pnapi, struct am65_cpsw_tx_chn, napi_tx)
@@ -215,8 +220,8 @@ struct am65_cpsw_ndev_priv {
extern const struct ethtool_ops am65_cpsw_ethtool_ops_slave;
void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common);
-void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common);
-int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx);
+int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
+ int num_tx, int num_rx);
bool am65_cpsw_port_dev_check(const struct net_device *dev);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 64bf22cd860c..8d02d2b21429 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/err.h>
@@ -45,6 +46,24 @@
#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C
#define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg)))
+#define ALE_POLICER_PORT_OUI 0x100
+#define ALE_POLICER_DA_SA 0x104
+#define ALE_POLICER_VLAN 0x108
+#define ALE_POLICER_ETHERTYPE_IPSA 0x10c
+#define ALE_POLICER_IPDA 0x110
+#define ALE_POLICER_PIR 0x118
+#define ALE_POLICER_CIR 0x11c
+#define ALE_POLICER_TBL_CTL 0x120
+#define ALE_POLICER_CTL 0x124
+#define ALE_POLICER_TEST_CTL 0x128
+#define ALE_POLICER_HIT_STATUS 0x12c
+#define ALE_THREAD_DEF 0x134
+#define ALE_THREAD_CTL 0x138
+#define ALE_THREAD_VAL 0x13c
+
+#define ALE_POLICER_TBL_WRITE_ENABLE BIT(31)
+#define ALE_POLICER_TBL_INDEX_MASK GENMASK(4, 0)
+
#define AM65_CPSW_ALE_THREAD_DEF_REG 0x134
/* ALE_AGING_TIMER */
@@ -76,7 +95,8 @@ enum {
* @dev_id: ALE version/SoC id
* @features: features supported by ALE
* @tbl_entries: number of ALE entries
- * @major_ver_mask: mask of ALE Major Version Value in ALE_IDVER reg.
+ * @reg_fields: pointer to array of register field configuration
+ * @num_fields: number of fields in the reg_fields array
* @nu_switch_ale: NU Switch ALE
* @vlan_entry_tbl: ALE vlan entry fields description tbl
*/
@@ -84,7 +104,8 @@ struct cpsw_ale_dev_id {
const char *dev_id;
u32 features;
u32 tbl_entries;
- u32 major_ver_mask;
+ const struct reg_field *reg_fields;
+ int num_fields;
bool nu_switch_ale;
const struct ale_entry_fld *vlan_entry_tbl;
};
@@ -102,7 +123,7 @@ struct cpsw_ale_dev_id {
#define ALE_UCAST_TOUCHED 3
#define ALE_TABLE_SIZE_MULTIPLIER 1024
-#define ALE_STATUS_SIZE_MASK 0x1f
+#define ALE_POLICER_SIZE_MULTIPLIER 8
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
@@ -1292,25 +1313,111 @@ void cpsw_ale_stop(struct cpsw_ale *ale)
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
}
+static const struct reg_field ale_fields_cpsw[] = {
+ /* CPSW_ALE_IDVER_REG */
+ [MINOR_VER] = REG_FIELD(ALE_IDVER, 0, 7),
+ [MAJOR_VER] = REG_FIELD(ALE_IDVER, 8, 15),
+};
+
+static const struct reg_field ale_fields_cpsw_nu[] = {
+ /* CPSW_ALE_IDVER_REG */
+ [MINOR_VER] = REG_FIELD(ALE_IDVER, 0, 7),
+ [MAJOR_VER] = REG_FIELD(ALE_IDVER, 8, 10),
+ /* CPSW_ALE_STATUS_REG */
+ [ALE_ENTRIES] = REG_FIELD(ALE_STATUS, 0, 7),
+ [ALE_POLICERS] = REG_FIELD(ALE_STATUS, 8, 15),
+ /* CPSW_ALE_POLICER_PORT_OUI_REG */
+ [POL_PORT_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 31, 31),
+ [POL_TRUNK_ID] = REG_FIELD(ALE_POLICER_PORT_OUI, 30, 30),
+ [POL_PORT_NUM] = REG_FIELD(ALE_POLICER_PORT_OUI, 25, 25),
+ [POL_PRI_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 19, 19),
+ [POL_PRI_VAL] = REG_FIELD(ALE_POLICER_PORT_OUI, 16, 18),
+ [POL_OUI_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 15, 15),
+ [POL_OUI_INDEX] = REG_FIELD(ALE_POLICER_PORT_OUI, 0, 5),
+
+ /* CPSW_ALE_POLICER_DA_SA_REG */
+ [POL_DST_MEN] = REG_FIELD(ALE_POLICER_DA_SA, 31, 31),
+ [POL_DST_INDEX] = REG_FIELD(ALE_POLICER_DA_SA, 16, 21),
+ [POL_SRC_MEN] = REG_FIELD(ALE_POLICER_DA_SA, 15, 15),
+ [POL_SRC_INDEX] = REG_FIELD(ALE_POLICER_DA_SA, 0, 5),
+
+ /* CPSW_ALE_POLICER_VLAN_REG */
+ [POL_OVLAN_MEN] = REG_FIELD(ALE_POLICER_VLAN, 31, 31),
+ [POL_OVLAN_INDEX] = REG_FIELD(ALE_POLICER_VLAN, 16, 21),
+ [POL_IVLAN_MEN] = REG_FIELD(ALE_POLICER_VLAN, 15, 15),
+ [POL_IVLAN_INDEX] = REG_FIELD(ALE_POLICER_VLAN, 0, 5),
+
+ /* CPSW_ALE_POLICER_ETHERTYPE_IPSA_REG */
+ [POL_ETHERTYPE_MEN] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 31, 31),
+ [POL_ETHERTYPE_INDEX] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 16, 21),
+ [POL_IPSRC_MEN] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 15, 15),
+ [POL_IPSRC_INDEX] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 0, 5),
+
+ /* CPSW_ALE_POLICER_IPDA_REG */
+ [POL_IPDST_MEN] = REG_FIELD(ALE_POLICER_IPDA, 31, 31),
+ [POL_IPDST_INDEX] = REG_FIELD(ALE_POLICER_IPDA, 16, 21),
+
+ /* CPSW_ALE_POLICER_TBL_CTL_REG */
+ /**
+ * REG_FIELDS not defined for this as fields cannot be correctly
+ * used independently
+ */
+
+ /* CPSW_ALE_POLICER_CTL_REG */
+ [POL_EN] = REG_FIELD(ALE_POLICER_CTL, 31, 31),
+ [POL_RED_DROP_EN] = REG_FIELD(ALE_POLICER_CTL, 29, 29),
+ [POL_YELLOW_DROP_EN] = REG_FIELD(ALE_POLICER_CTL, 28, 28),
+ [POL_YELLOW_THRESH] = REG_FIELD(ALE_POLICER_CTL, 24, 26),
+ [POL_POL_MATCH_MODE] = REG_FIELD(ALE_POLICER_CTL, 22, 23),
+ [POL_PRIORITY_THREAD_EN] = REG_FIELD(ALE_POLICER_CTL, 21, 21),
+ [POL_MAC_ONLY_DEF_DIS] = REG_FIELD(ALE_POLICER_CTL, 20, 20),
+
+ /* CPSW_ALE_POLICER_TEST_CTL_REG */
+ [POL_TEST_CLR] = REG_FIELD(ALE_POLICER_TEST_CTL, 31, 31),
+ [POL_TEST_CLR_RED] = REG_FIELD(ALE_POLICER_TEST_CTL, 30, 30),
+ [POL_TEST_CLR_YELLOW] = REG_FIELD(ALE_POLICER_TEST_CTL, 29, 29),
+ [POL_TEST_CLR_SELECTED] = REG_FIELD(ALE_POLICER_TEST_CTL, 28, 28),
+ [POL_TEST_ENTRY] = REG_FIELD(ALE_POLICER_TEST_CTL, 0, 4),
+
+ /* CPSW_ALE_POLICER_HIT_STATUS_REG */
+ [POL_STATUS_HIT] = REG_FIELD(ALE_POLICER_HIT_STATUS, 31, 31),
+ [POL_STATUS_HIT_RED] = REG_FIELD(ALE_POLICER_HIT_STATUS, 30, 30),
+ [POL_STATUS_HIT_YELLOW] = REG_FIELD(ALE_POLICER_HIT_STATUS, 29, 29),
+
+ /* CPSW_ALE_THREAD_DEF_REG */
+ [ALE_DEFAULT_THREAD_EN] = REG_FIELD(ALE_THREAD_DEF, 15, 15),
+ [ALE_DEFAULT_THREAD_VAL] = REG_FIELD(ALE_THREAD_DEF, 0, 5),
+
+ /* CPSW_ALE_THREAD_CTL_REG */
+ [ALE_THREAD_CLASS_INDEX] = REG_FIELD(ALE_THREAD_CTL, 0, 4),
+
+ /* CPSW_ALE_THREAD_VAL_REG */
+ [ALE_THREAD_ENABLE] = REG_FIELD(ALE_THREAD_VAL, 15, 15),
+ [ALE_THREAD_VALUE] = REG_FIELD(ALE_THREAD_VAL, 0, 5),
+};
+
static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
{
/* am3/4/5, dra7. dm814x, 66ak2hk-gbe */
.dev_id = "cpsw",
.tbl_entries = 1024,
- .major_ver_mask = 0xff,
+ .reg_fields = ale_fields_cpsw,
+ .num_fields = ARRAY_SIZE(ale_fields_cpsw),
.vlan_entry_tbl = vlan_entry_cpsw,
},
{
/* 66ak2h_xgbe */
.dev_id = "66ak2h-xgbe",
.tbl_entries = 2048,
- .major_ver_mask = 0xff,
+ .reg_fields = ale_fields_cpsw,
+ .num_fields = ARRAY_SIZE(ale_fields_cpsw),
.vlan_entry_tbl = vlan_entry_cpsw,
},
{
.dev_id = "66ak2el",
.features = CPSW_ALE_F_STATUS_REG,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
+ .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu),
.nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu,
},
@@ -1318,7 +1425,8 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.dev_id = "66ak2g",
.features = CPSW_ALE_F_STATUS_REG,
.tbl_entries = 64,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
+ .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu),
.nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu,
},
@@ -1326,20 +1434,23 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.dev_id = "am65x-cpsw2g",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
.tbl_entries = 64,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
+ .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu),
.nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu,
},
{
.dev_id = "j721e-cpswxg",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
+ .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu),
.vlan_entry_tbl = vlan_entry_k3_cpswxg,
},
{
.dev_id = "am64-cpswxg",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
+ .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu),
.vlan_entry_tbl = vlan_entry_k3_cpswxg,
.tbl_entries = 512,
},
@@ -1361,47 +1472,81 @@ cpsw_ale_dev_id *cpsw_ale_match_id(const struct cpsw_ale_dev_id *id,
return NULL;
}
+static const struct regmap_config ale_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .name = "cpsw-ale",
+};
+
+static int cpsw_ale_regfield_init(struct cpsw_ale *ale)
+{
+ const struct reg_field *reg_fields = ale->params.reg_fields;
+ struct device *dev = ale->params.dev;
+ struct regmap *regmap = ale->regmap;
+ int i;
+
+ for (i = 0; i < ale->params.num_fields; i++) {
+ ale->fields[i] = devm_regmap_field_alloc(dev, regmap,
+ reg_fields[i]);
+ if (IS_ERR(ale->fields[i])) {
+ dev_err(dev, "Unable to allocate regmap field %d\n", i);
+ return PTR_ERR(ale->fields[i]);
+ }
+ }
+
+ return 0;
+}
+
struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
{
+ u32 ale_entries, rev_major, rev_minor, policers;
const struct cpsw_ale_dev_id *ale_dev_id;
struct cpsw_ale *ale;
- u32 rev, ale_entries;
+ int ret;
ale_dev_id = cpsw_ale_match_id(cpsw_ale_id_match, params->dev_id);
if (!ale_dev_id)
return ERR_PTR(-EINVAL);
params->ale_entries = ale_dev_id->tbl_entries;
- params->major_ver_mask = ale_dev_id->major_ver_mask;
params->nu_switch_ale = ale_dev_id->nu_switch_ale;
+ params->reg_fields = ale_dev_id->reg_fields;
+ params->num_fields = ale_dev_id->num_fields;
ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
if (!ale)
return ERR_PTR(-ENOMEM);
+ ale->regmap = devm_regmap_init_mmio(params->dev, params->ale_regs,
+ &ale_regmap_cfg);
+ if (IS_ERR(ale->regmap)) {
+ dev_err(params->dev, "Couldn't create CPSW ALE regmap\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ale->params = *params;
+ ret = cpsw_ale_regfield_init(ale);
+ if (ret)
+ return ERR_PTR(ret);
ale->p0_untag_vid_mask = devm_bitmap_zalloc(params->dev, VLAN_N_VID,
GFP_KERNEL);
if (!ale->p0_untag_vid_mask)
return ERR_PTR(-ENOMEM);
- ale->params = *params;
ale->ageout = ale->params.ale_ageout * HZ;
ale->features = ale_dev_id->features;
ale->vlan_entry_tbl = ale_dev_id->vlan_entry_tbl;
- rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER);
- ale->version =
- (ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) |
- ALE_VERSION_MINOR(rev);
+ regmap_field_read(ale->fields[MINOR_VER], &rev_minor);
+ regmap_field_read(ale->fields[MAJOR_VER], &rev_major);
+ ale->version = rev_major << 8 | rev_minor;
dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n",
- ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask),
- ALE_VERSION_MINOR(rev));
+ rev_major, rev_minor);
if (ale->features & CPSW_ALE_F_STATUS_REG &&
!ale->params.ale_entries) {
- ale_entries =
- readl_relaxed(ale->params.ale_regs + ALE_STATUS) &
- ALE_STATUS_SIZE_MASK;
+ regmap_field_read(ale->fields[ALE_ENTRIES], &ale_entries);
/* ALE available on newer NetCP switches has introduced
* a register, ALE_STATUS, to indicate the size of ALE
* table which shows the size as a multiple of 1024 entries.
@@ -1415,8 +1560,20 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
ale_entries *= ALE_TABLE_SIZE_MULTIPLIER;
ale->params.ale_entries = ale_entries;
}
+
+ if (ale->features & CPSW_ALE_F_STATUS_REG &&
+ !ale->params.num_policers) {
+ regmap_field_read(ale->fields[ALE_POLICERS], &policers);
+ if (!policers)
+ return ERR_PTR(-EINVAL);
+
+ policers *= ALE_POLICER_SIZE_MULTIPLIER;
+ ale->params.num_policers = policers;
+ }
+
dev_info(ale->params.dev,
- "ALE Table size %ld\n", ale->params.ale_entries);
+ "ALE Table size %ld, Policers %ld\n", ale->params.ale_entries,
+ ale->params.num_policers);
/* set default bits for existing h/w */
ale->port_mask_bits = ale->params.ale_ports;
@@ -1480,3 +1637,97 @@ u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale)
{
return ale ? ale->params.ale_entries : 0;
}
+
+/* Reads the specified policer index into ALE POLICER registers */
+static void cpsw_ale_policer_read_idx(struct cpsw_ale *ale, u32 idx)
+{
+ idx &= ALE_POLICER_TBL_INDEX_MASK;
+ writel_relaxed(idx, ale->params.ale_regs + ALE_POLICER_TBL_CTL);
+}
+
+/* Writes the ALE POLICER registers into the specified policer index */
+static void cpsw_ale_policer_write_idx(struct cpsw_ale *ale, u32 idx)
+{
+ idx &= ALE_POLICER_TBL_INDEX_MASK;
+ idx |= ALE_POLICER_TBL_WRITE_ENABLE;
+ writel_relaxed(idx, ale->params.ale_regs + ALE_POLICER_TBL_CTL);
+}
+
+/* enables/disables the custom thread value for the specified policer index */
+static void cpsw_ale_policer_thread_idx_enable(struct cpsw_ale *ale, u32 idx,
+ u32 thread_id, bool enable)
+{
+ regmap_field_write(ale->fields[ALE_THREAD_CLASS_INDEX], idx);
+ regmap_field_write(ale->fields[ALE_THREAD_VALUE], thread_id);
+ regmap_field_write(ale->fields[ALE_THREAD_ENABLE], enable ? 1 : 0);
+}
+
+/* Disable all policer entries and thread mappings */
+static void cpsw_ale_policer_reset(struct cpsw_ale *ale)
+{
+ int i;
+
+ for (i = 0; i < ale->params.num_policers ; i++) {
+ cpsw_ale_policer_read_idx(ale, i);
+ regmap_field_write(ale->fields[POL_PORT_MEN], 0);
+ regmap_field_write(ale->fields[POL_PRI_MEN], 0);
+ regmap_field_write(ale->fields[POL_OUI_MEN], 0);
+ regmap_field_write(ale->fields[POL_DST_MEN], 0);
+ regmap_field_write(ale->fields[POL_SRC_MEN], 0);
+ regmap_field_write(ale->fields[POL_OVLAN_MEN], 0);
+ regmap_field_write(ale->fields[POL_IVLAN_MEN], 0);
+ regmap_field_write(ale->fields[POL_ETHERTYPE_MEN], 0);
+ regmap_field_write(ale->fields[POL_IPSRC_MEN], 0);
+ regmap_field_write(ale->fields[POL_IPDST_MEN], 0);
+ regmap_field_write(ale->fields[POL_EN], 0);
+ regmap_field_write(ale->fields[POL_RED_DROP_EN], 0);
+ regmap_field_write(ale->fields[POL_YELLOW_DROP_EN], 0);
+ regmap_field_write(ale->fields[POL_PRIORITY_THREAD_EN], 0);
+
+ cpsw_ale_policer_thread_idx_enable(ale, i, 0, 0);
+ }
+}
+
+/* Default classifier is to map 8 user priorities to N receive channels */
+void cpsw_ale_classifier_setup_default(struct cpsw_ale *ale, int num_rx_ch)
+{
+ int pri, idx;
+ /* IEEE802.1D-2004, Standard for Local and metropolitan area networks
+ * Table G-2 - Traffic type acronyms
+ * Table G-3 - Defining traffic types
+ * User priority values 1 and 2 effectively communicate a lower
+ * priority than 0. In the below table 0 is assigned to higher priority
+ * thread than 1 and 2 wherever possible.
+ * The below table maps which thread the user priority needs to be
+ * sent to for a given number of threads (RX channels). Upper threads
+ * have higher priority.
+ * e.g. if number of threads is 8 then user priority 0 will map to
+ * pri_thread_map[8-1][0] i.e. thread 2
+ */
+ int pri_thread_map[8][8] = { { 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 1, 1, 1, 1, },
+ { 0, 0, 0, 0, 1, 1, 2, 2, },
+ { 1, 0, 0, 1, 2, 2, 3, 3, },
+ { 1, 0, 0, 1, 2, 3, 4, 4, },
+ { 1, 0, 0, 2, 3, 4, 5, 5, },
+ { 1, 0, 0, 2, 3, 4, 5, 6, },
+ { 2, 0, 1, 3, 4, 5, 6, 7, } };
+
+ cpsw_ale_policer_reset(ale);
+
+ /* use first 8 classifiers to map 8 (DSCP/PCP) priorities to channels */
+ for (pri = 0; pri < 8; pri++) {
+ idx = pri;
+
+ /* Classifier 'idx' match on priority 'pri' */
+ cpsw_ale_policer_read_idx(ale, idx);
+ regmap_field_write(ale->fields[POL_PRI_VAL], pri);
+ regmap_field_write(ale->fields[POL_PRI_MEN], 1);
+ cpsw_ale_policer_write_idx(ale, idx);
+
+ /* Map Classifier 'idx' to thread provided by the map */
+ cpsw_ale_policer_thread_idx_enable(ale, idx,
+ pri_thread_map[num_rx_ch - 1][pri],
+ 1);
+ }
+}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 6779ee111d57..87b7d1b3a34a 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -8,11 +8,14 @@
#ifndef __TI_CPSW_ALE_H__
#define __TI_CPSW_ALE_H__
+struct reg_fields;
+
struct cpsw_ale_params {
struct device *dev;
void __iomem *ale_regs;
unsigned long ale_ageout; /* in secs */
unsigned long ale_entries;
+ unsigned long num_policers;
unsigned long ale_ports;
/* NU Switch has specific handling as number of bits in ALE entries
* are different than other versions of ALE. Also there are specific
@@ -20,19 +23,70 @@ struct cpsw_ale_params {
* to identify this hardware.
*/
bool nu_switch_ale;
- /* mask bit used in NU Switch ALE is 3 bits instead of 8 bits. So
- * pass it from caller.
- */
- u32 major_ver_mask;
+ const struct reg_field *reg_fields;
+ int num_fields;
const char *dev_id;
unsigned long bus_freq;
};
struct ale_entry_fld;
+struct regmap;
+
+enum ale_fields {
+ MINOR_VER,
+ MAJOR_VER,
+ ALE_ENTRIES,
+ ALE_POLICERS,
+ POL_PORT_MEN,
+ POL_TRUNK_ID,
+ POL_PORT_NUM,
+ POL_PRI_MEN,
+ POL_PRI_VAL,
+ POL_OUI_MEN,
+ POL_OUI_INDEX,
+ POL_DST_MEN,
+ POL_DST_INDEX,
+ POL_SRC_MEN,
+ POL_SRC_INDEX,
+ POL_OVLAN_MEN,
+ POL_OVLAN_INDEX,
+ POL_IVLAN_MEN,
+ POL_IVLAN_INDEX,
+ POL_ETHERTYPE_MEN,
+ POL_ETHERTYPE_INDEX,
+ POL_IPSRC_MEN,
+ POL_IPSRC_INDEX,
+ POL_IPDST_MEN,
+ POL_IPDST_INDEX,
+ POL_EN,
+ POL_RED_DROP_EN,
+ POL_YELLOW_DROP_EN,
+ POL_YELLOW_THRESH,
+ POL_POL_MATCH_MODE,
+ POL_PRIORITY_THREAD_EN,
+ POL_MAC_ONLY_DEF_DIS,
+ POL_TEST_CLR,
+ POL_TEST_CLR_RED,
+ POL_TEST_CLR_YELLOW,
+ POL_TEST_CLR_SELECTED,
+ POL_TEST_ENTRY,
+ POL_STATUS_HIT,
+ POL_STATUS_HIT_RED,
+ POL_STATUS_HIT_YELLOW,
+ ALE_DEFAULT_THREAD_EN,
+ ALE_DEFAULT_THREAD_VAL,
+ ALE_THREAD_CLASS_INDEX,
+ ALE_THREAD_ENABLE,
+ ALE_THREAD_VALUE,
+ /* terminator */
+ ALE_FIELDS_MAX,
+};
struct cpsw_ale {
struct cpsw_ale_params params;
struct timer_list timer;
+ struct regmap *regmap;
+ struct regmap_field *fields[ALE_FIELDS_MAX];
unsigned long ageout;
u32 version;
u32 features;
@@ -140,5 +194,6 @@ int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask);
void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
bool add);
+void cpsw_ale_classifier_setup_default(struct cpsw_ale *ale, int num_rx_ch);
#endif
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 53ed23d68722..21d55a180ef6 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -725,8 +725,6 @@ int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *inf
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = cpsw->cpts->phc_index;
info->tx_types =
@@ -741,10 +739,7 @@ int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *inf
int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = 0;
info->rx_filters = 0;
return 0;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 2baa198ebfa0..557cc71b9dd2 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1407,7 +1407,8 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
cpsw->slaves[i].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC;
+ ndev->netns_local = true;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 75c294ce6fb6..5d6d1cf78e93 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -53,78 +53,6 @@
#define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n) BIT(LATCH_INDEX(n))
#define IEP_CAP_CFG_CAP_ASYNC_EN(n) BIT(LATCH_INDEX(n) + 10)
-enum {
- ICSS_IEP_GLOBAL_CFG_REG,
- ICSS_IEP_GLOBAL_STATUS_REG,
- ICSS_IEP_COMPEN_REG,
- ICSS_IEP_SLOW_COMPEN_REG,
- ICSS_IEP_COUNT_REG0,
- ICSS_IEP_COUNT_REG1,
- ICSS_IEP_CAPTURE_CFG_REG,
- ICSS_IEP_CAPTURE_STAT_REG,
-
- ICSS_IEP_CAP6_RISE_REG0,
- ICSS_IEP_CAP6_RISE_REG1,
-
- ICSS_IEP_CAP7_RISE_REG0,
- ICSS_IEP_CAP7_RISE_REG1,
-
- ICSS_IEP_CMP_CFG_REG,
- ICSS_IEP_CMP_STAT_REG,
- ICSS_IEP_CMP0_REG0,
- ICSS_IEP_CMP0_REG1,
- ICSS_IEP_CMP1_REG0,
- ICSS_IEP_CMP1_REG1,
-
- ICSS_IEP_CMP8_REG0,
- ICSS_IEP_CMP8_REG1,
- ICSS_IEP_SYNC_CTRL_REG,
- ICSS_IEP_SYNC0_STAT_REG,
- ICSS_IEP_SYNC1_STAT_REG,
- ICSS_IEP_SYNC_PWIDTH_REG,
- ICSS_IEP_SYNC0_PERIOD_REG,
- ICSS_IEP_SYNC1_DELAY_REG,
- ICSS_IEP_SYNC_START_REG,
- ICSS_IEP_MAX_REGS,
-};
-
-/**
- * struct icss_iep_plat_data - Plat data to handle SoC variants
- * @config: Regmap configuration data
- * @reg_offs: register offsets to capture offset differences across SoCs
- * @flags: Flags to represent IEP properties
- */
-struct icss_iep_plat_data {
- const struct regmap_config *config;
- u32 reg_offs[ICSS_IEP_MAX_REGS];
- u32 flags;
-};
-
-struct icss_iep {
- struct device *dev;
- void __iomem *base;
- const struct icss_iep_plat_data *plat_data;
- struct regmap *map;
- struct device_node *client_np;
- unsigned long refclk_freq;
- int clk_tick_time; /* one refclk tick time in ns */
- struct ptp_clock_info ptp_info;
- struct ptp_clock *ptp_clock;
- struct mutex ptp_clk_mutex; /* PHC access serializer */
- u32 def_inc;
- s16 slow_cmp_inc;
- u32 slow_cmp_count;
- const struct icss_iep_clockops *ops;
- void *clockops_data;
- u32 cycle_time_ns;
- u32 perout_enabled;
- bool pps_enabled;
- int cap_cmp_irq;
- u64 period;
- u32 latch_enable;
- struct work_struct work;
-};
-
/**
* icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
* @iep: Pointer to structure representing IEP.
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.h b/drivers/net/ethernet/ti/icssg/icss_iep.h
index 803a4b714893..0bdca0155abd 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.h
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.h
@@ -12,7 +12,78 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
-struct icss_iep;
+enum {
+ ICSS_IEP_GLOBAL_CFG_REG,
+ ICSS_IEP_GLOBAL_STATUS_REG,
+ ICSS_IEP_COMPEN_REG,
+ ICSS_IEP_SLOW_COMPEN_REG,
+ ICSS_IEP_COUNT_REG0,
+ ICSS_IEP_COUNT_REG1,
+ ICSS_IEP_CAPTURE_CFG_REG,
+ ICSS_IEP_CAPTURE_STAT_REG,
+
+ ICSS_IEP_CAP6_RISE_REG0,
+ ICSS_IEP_CAP6_RISE_REG1,
+
+ ICSS_IEP_CAP7_RISE_REG0,
+ ICSS_IEP_CAP7_RISE_REG1,
+
+ ICSS_IEP_CMP_CFG_REG,
+ ICSS_IEP_CMP_STAT_REG,
+ ICSS_IEP_CMP0_REG0,
+ ICSS_IEP_CMP0_REG1,
+ ICSS_IEP_CMP1_REG0,
+ ICSS_IEP_CMP1_REG1,
+
+ ICSS_IEP_CMP8_REG0,
+ ICSS_IEP_CMP8_REG1,
+ ICSS_IEP_SYNC_CTRL_REG,
+ ICSS_IEP_SYNC0_STAT_REG,
+ ICSS_IEP_SYNC1_STAT_REG,
+ ICSS_IEP_SYNC_PWIDTH_REG,
+ ICSS_IEP_SYNC0_PERIOD_REG,
+ ICSS_IEP_SYNC1_DELAY_REG,
+ ICSS_IEP_SYNC_START_REG,
+ ICSS_IEP_MAX_REGS,
+};
+
+/**
+ * struct icss_iep_plat_data - Plat data to handle SoC variants
+ * @config: Regmap configuration data
+ * @reg_offs: register offsets to capture offset differences across SoCs
+ * @flags: Flags to represent IEP properties
+ */
+struct icss_iep_plat_data {
+ const struct regmap_config *config;
+ u32 reg_offs[ICSS_IEP_MAX_REGS];
+ u32 flags;
+};
+
+struct icss_iep {
+ struct device *dev;
+ void __iomem *base;
+ const struct icss_iep_plat_data *plat_data;
+ struct regmap *map;
+ struct device_node *client_np;
+ unsigned long refclk_freq;
+ int clk_tick_time; /* one refclk tick time in ns */
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct mutex ptp_clk_mutex; /* PHC access serializer */
+ u32 def_inc;
+ s16 slow_cmp_inc;
+ u32 slow_cmp_count;
+ const struct icss_iep_clockops *ops;
+ void *clockops_data;
+ u32 cycle_time_ns;
+ u32 perout_enabled;
+ bool pps_enabled;
+ int cap_cmp_irq;
+ u64 period;
+ u32 latch_enable;
+ struct work_struct work;
+};
+
extern const struct icss_iep_clockops prueth_iep_clockops;
/* Firmware specific clock operations */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
index 9ec504d976d6..833ca86d0b71 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
@@ -290,6 +290,7 @@ void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac)
mac[2] << 16 | mac[3] << 24));
regmap_write(miig_rt, MAC_INTERFACE_1, (u32)(mac[4] | mac[5] << 8));
}
+EXPORT_SYMBOL_GPL(icssg_class_set_host_mac_addr);
void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac)
{
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index b9d8a93d1680..fdebeb2f84e0 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -660,14 +660,15 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
{
struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
struct netdev_queue *netif_txq;
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
+ u32 pkt_len, dst_tag_id;
int i, ret = 0, q_idx;
bool in_tx_ts = 0;
int tx_ts_cookie;
void **swdata;
- u32 pkt_len;
u32 *epib;
pkt_len = skb_headlen(skb);
@@ -712,9 +713,20 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
/* set dst tag to indicate internal qid at the firmware which is at
* bit8..bit15. bit0..bit7 indicates port num for directed
- * packets in case of switch mode operation
+ * packets in case of switch mode operation and port num 0
+ * for undirected packets in case of HSR offload mode
*/
- cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
+ dst_tag_id = emac->port_id | (q_idx << 8);
+
+ if (prueth->is_hsr_offload_mode &&
+ (ndev->features & NETIF_F_HW_HSR_DUP))
+ dst_tag_id = PRUETH_UNDIRECTED_PKT_DST_TAG;
+
+ if (prueth->is_hsr_offload_mode &&
+ (ndev->features & NETIF_F_HW_HSR_TAG_INS))
+ epib[1] |= PRUETH_UNDIRECTED_PKT_TAG_INS;
+
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, dst_tag_id);
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index dae52a83a378..5d2491c2943a 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -107,7 +107,7 @@ static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = {
},
};
-static void icssg_config_mii_init_switch(struct prueth_emac *emac)
+static void icssg_config_mii_init_fw_offload(struct prueth_emac *emac)
{
struct prueth *prueth = emac->prueth;
int mii = prueth_emac_slice(emac);
@@ -278,7 +278,7 @@ static int emac_r30_is_done(struct prueth_emac *emac)
return 1;
}
-static int prueth_switch_buffer_setup(struct prueth_emac *emac)
+static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
{
struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
struct icssg_rxq_ctx __iomem *rxq_ctx;
@@ -424,7 +424,7 @@ static void icssg_init_emac_mode(struct prueth *prueth)
icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
}
-static void icssg_init_switch_mode(struct prueth *prueth)
+static void icssg_init_fw_offload_mode(struct prueth *prueth)
{
u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
int i;
@@ -455,8 +455,8 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
struct icssg_flow_cfg __iomem *flow_cfg;
int ret;
- if (prueth->is_switch_mode)
- icssg_init_switch_mode(prueth);
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+ icssg_init_fw_offload_mode(prueth);
else
icssg_init_emac_mode(prueth);
@@ -472,8 +472,8 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET,
ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT);
icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if);
- if (prueth->is_switch_mode)
- icssg_config_mii_init_switch(emac);
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+ icssg_config_mii_init_fw_offload(emac);
else
icssg_config_mii_init(emac);
icssg_config_ipg(emac);
@@ -498,8 +498,8 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
writeb(0, config + SPL_PKT_DEFAULT_PRIORITY);
writeb(0, config + QUEUE_NUM_UNTAGGED);
- if (prueth->is_switch_mode)
- ret = prueth_switch_buffer_setup(emac);
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+ ret = prueth_fw_offload_buffer_setup(emac);
else
ret = prueth_emac_buffer_setup(emac);
if (ret)
@@ -531,7 +531,9 @@ static const struct icssg_r30_cmd emac_r32_bitmask[] = {
{{EMAC_NONE, 0xffff4000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx ENABLE*/
{{EMAC_NONE, 0xbfff0000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx DISABLE*/
{{0xffff0010, EMAC_NONE, 0xffff0010, EMAC_NONE}}, /* VLAN AWARE*/
- {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}} /* VLAN UNWARE*/
+ {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}}, /* VLAN UNWARE*/
+ {{0xffff2000, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* HSR_RX_OFFLOAD_ENABLE */
+ {{0xdfff0000, EMAC_NONE, EMAC_NONE, EMAC_NONE}} /* HSR_RX_OFFLOAD_DISABLE */
};
int icssg_set_port_state(struct prueth_emac *emac,
@@ -733,6 +735,7 @@ void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
u8 fid_c1;
tbl = prueth->vlan_tbl;
+ spin_lock(&prueth->vtbl_lock);
fid_c1 = tbl[vid].fid_c1;
/* FID_C1: bit0..2 port membership mask,
@@ -748,6 +751,7 @@ void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
}
tbl[vid].fid_c1 = fid_c1;
+ spin_unlock(&prueth->vtbl_lock);
}
EXPORT_SYMBOL_GPL(icssg_vtbl_modify);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
index 1ac60283923b..92c2deaa3068 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
@@ -80,6 +80,8 @@ enum icssg_port_state_cmd {
ICSSG_EMAC_PORT_PREMPT_TX_DISABLE,
ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE,
ICSSG_EMAC_PORT_VLAN_AWARE_DISABLE,
+ ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE,
+ ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE,
ICSSG_EMAC_PORT_MAX_COMMANDS
};
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
index 5688f054cec5..b715af21d23a 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -68,9 +68,13 @@ static int emac_nway_reset(struct net_device *ndev)
static int emac_get_sset_count(struct net_device *ndev, int stringset)
{
+ struct prueth_emac *emac = netdev_priv(ndev);
switch (stringset) {
case ETH_SS_STATS:
- return ICSSG_NUM_ETHTOOL_STATS;
+ if (emac->prueth->pa_stats)
+ return ICSSG_NUM_ETHTOOL_STATS;
+ else
+ return ICSSG_NUM_ETHTOOL_STATS - ICSSG_NUM_PA_STATS;
default:
return -EOPNOTSUPP;
}
@@ -78,18 +82,18 @@ static int emac_get_sset_count(struct net_device *ndev, int stringset)
static void emac_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
+ struct prueth_emac *emac = netdev_priv(ndev);
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) {
- if (!icssg_all_stats[i].standard_stats) {
- memcpy(p, icssg_all_stats[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++)
+ if (!icssg_all_miig_stats[i].standard_stats)
+ ethtool_puts(&p, icssg_all_miig_stats[i].name);
+ if (emac->prueth->pa_stats)
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++)
+ ethtool_puts(&p, icssg_all_pa_stats[i].name);
break;
default:
break;
@@ -104,9 +108,13 @@ static void emac_get_ethtool_stats(struct net_device *ndev,
emac_update_hardware_stats(emac);
- for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++)
- if (!icssg_all_stats[i].standard_stats)
+ for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++)
+ if (!icssg_all_miig_stats[i].standard_stats)
*(data++) = emac->stats[i];
+
+ if (emac->prueth->pa_stats)
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++)
+ *(data++) = emac->pa_stats[i];
}
static int emac_get_ts_info(struct net_device *ndev,
@@ -118,8 +126,6 @@ static int emac_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = icss_iep_get_ptp_clock_idx(emac->iep);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 3e51b3a9b0a5..5c20ceb164df 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -13,6 +13,7 @@
#include <linux/dma/ti-cppi5.h>
#include <linux/etherdevice.h>
#include <linux/genalloc.h>
+#include <linux/if_hsr.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -40,6 +41,11 @@
#define DEFAULT_PORT_MASK 1
#define DEFAULT_UNTAG_MASK 1
+#define NETIF_PRUETH_HSR_OFFLOAD_FEATURES (NETIF_F_HW_HSR_FWD | \
+ NETIF_F_HW_HSR_DUP | \
+ NETIF_F_HW_HSR_TAG_INS | \
+ NETIF_F_HW_HSR_TAG_RM)
+
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
@@ -118,6 +124,19 @@ static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static struct icssg_firmwares icssg_hsr_firmwares[] = {
+ {
+ .pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf",
+ .rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf",
+ .txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf",
+ },
+ {
+ .pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf",
+ .rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf",
+ .txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf",
+ }
+};
+
static struct icssg_firmwares icssg_switch_firmwares[] = {
{
.pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf",
@@ -152,6 +171,8 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
if (prueth->is_switch_mode)
firmwares = icssg_switch_firmwares;
+ else if (prueth->is_hsr_offload_mode)
+ firmwares = icssg_hsr_firmwares;
else
firmwares = icssg_emac_firmwares;
@@ -365,7 +386,8 @@ static void prueth_iep_settime(void *clockops_data, u64 ns)
sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
sc_desc.iepcount_set = ns % cycletime;
- sc_desc.CMP0_current = cycletime - 4; //Count from 0 to (cycle time)-4
+ /* Count from 0 to (cycle time) - emac->iep->def_inc */
+ sc_desc.CMP0_current = cycletime - emac->iep->def_inc;
memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
@@ -470,6 +492,36 @@ static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
return 0;
}
+static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ icssg_fdb_add_del(emac, addr, prueth->default_vlan,
+ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_BLOCK, true);
+
+ icssg_vtbl_modify(emac, emac->port_vlan, BIT(emac->port_id),
+ BIT(emac->port_id), true);
+ return 0;
+}
+
+static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ icssg_fdb_add_del(emac, addr, prueth->default_vlan,
+ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_BLOCK, false);
+
+ return 0;
+}
+
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
@@ -630,7 +682,10 @@ static int emac_ndo_stop(struct net_device *ndev)
icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
- __dev_mc_unsync(ndev, icssg_prueth_del_mcast);
+ if (emac->prueth->is_hsr_offload_mode)
+ __dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
+ else
+ __dev_mc_unsync(ndev, icssg_prueth_del_mcast);
atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
/* ensure new tdown_cnt value is visible */
@@ -708,7 +763,12 @@ static void emac_ndo_set_rx_mode_work(struct work_struct *work)
return;
}
- __dev_mc_sync(ndev, icssg_prueth_add_mcast, icssg_prueth_del_mcast);
+ if (emac->prueth->is_hsr_offload_mode)
+ __dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
+ icssg_prueth_hsr_del_mcast);
+ else
+ __dev_mc_sync(ndev, icssg_prueth_add_mcast,
+ icssg_prueth_del_mcast);
}
/**
@@ -725,6 +785,29 @@ static void emac_ndo_set_rx_mode(struct net_device *ndev)
queue_work(emac->cmd_wq, &emac->rx_mode_work);
}
+static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ /* hsr tag insertion offload and hsr dup offload are tightly coupled in
+ * firmware implementation. Both these features need to be enabled /
+ * disabled together.
+ */
+ if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS)))
+ if ((features & NETIF_F_HW_HSR_DUP) ||
+ (features & NETIF_F_HW_HSR_TAG_INS))
+ features |= NETIF_F_HW_HSR_DUP |
+ NETIF_F_HW_HSR_TAG_INS;
+
+ if ((ndev->features & NETIF_F_HW_HSR_DUP) ||
+ (ndev->features & NETIF_F_HW_HSR_TAG_INS))
+ if (!(features & NETIF_F_HW_HSR_DUP) ||
+ !(features & NETIF_F_HW_HSR_TAG_INS))
+ features &= ~(NETIF_F_HW_HSR_DUP |
+ NETIF_F_HW_HSR_TAG_INS);
+
+ return features;
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -736,6 +819,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_eth_ioctl = icssg_ndo_ioctl,
.ndo_get_stats64 = icssg_ndo_get_stats64,
.ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
+ .ndo_fix_features = emac_ndo_fix_features,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -857,12 +941,14 @@ static int prueth_netdev_init(struct prueth *prueth,
}
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+ ndev->dev.of_node = eth_node;
ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
ndev->max_mtu = PRUETH_MAX_MTU;
ndev->netdev_ops = &emac_netdev_ops;
ndev->ethtool_ops = &icssg_ethtool_ops;
ndev->hw_features = NETIF_F_SG;
ndev->features = ndev->hw_features;
+ ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
@@ -951,7 +1037,7 @@ static void prueth_emac_restart(struct prueth *prueth)
netif_device_attach(emac1->ndev);
}
-static void icssg_enable_switch_mode(struct prueth *prueth)
+static void icssg_change_mode(struct prueth *prueth)
{
struct prueth_emac *emac;
int mac;
@@ -960,6 +1046,13 @@ static void icssg_enable_switch_mode(struct prueth *prueth)
for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
emac = prueth->emac[mac];
+ if (prueth->is_hsr_offload_mode) {
+ if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
+ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
+ else
+ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
+ }
+
if (netif_running(emac->ndev)) {
icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
@@ -971,8 +1064,13 @@ static void icssg_enable_switch_mode(struct prueth *prueth)
BIT(emac->port_id) | DEFAULT_PORT_MASK,
BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
true);
+ if (prueth->is_hsr_offload_mode)
+ icssg_vtbl_modify(emac, DEFAULT_VID,
+ DEFAULT_PORT_MASK,
+ DEFAULT_UNTAG_MASK, true);
icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
- icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
+ if (prueth->is_switch_mode)
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
}
}
}
@@ -1010,7 +1108,7 @@ static int prueth_netdevice_port_link(struct net_device *ndev,
prueth->is_switch_mode = true;
prueth->default_vlan = 1;
emac->port_vlan = prueth->default_vlan;
- icssg_enable_switch_mode(prueth);
+ icssg_change_mode(prueth);
}
}
@@ -1038,6 +1136,61 @@ static void prueth_netdevice_port_unlink(struct net_device *ndev)
prueth->hw_bridge_dev = NULL;
}
+static int prueth_hsr_port_link(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct prueth_emac *emac0;
+ struct prueth_emac *emac1;
+
+ emac0 = prueth->emac[PRUETH_MAC0];
+ emac1 = prueth->emac[PRUETH_MAC1];
+
+ if (prueth->is_switch_mode)
+ return -EOPNOTSUPP;
+
+ prueth->hsr_members |= BIT(emac->port_id);
+ if (!prueth->is_hsr_offload_mode) {
+ if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) &&
+ prueth->hsr_members & BIT(PRUETH_PORT_MII1)) {
+ if (!(emac0->ndev->features &
+ NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
+ !(emac1->ndev->features &
+ NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
+ return -EOPNOTSUPP;
+ prueth->is_hsr_offload_mode = true;
+ prueth->default_vlan = 1;
+ emac0->port_vlan = prueth->default_vlan;
+ emac1->port_vlan = prueth->default_vlan;
+ icssg_change_mode(prueth);
+ netdev_dbg(ndev, "Enabling HSR offload mode\n");
+ }
+ }
+
+ return 0;
+}
+
+static void prueth_hsr_port_unlink(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct prueth_emac *emac0;
+ struct prueth_emac *emac1;
+
+ emac0 = prueth->emac[PRUETH_MAC0];
+ emac1 = prueth->emac[PRUETH_MAC1];
+
+ prueth->hsr_members &= ~BIT(emac->port_id);
+ if (prueth->is_hsr_offload_mode) {
+ prueth->is_hsr_offload_mode = false;
+ emac0->port_vlan = 0;
+ emac1->port_vlan = 0;
+ prueth->hsr_dev = NULL;
+ prueth_emac_restart(prueth);
+ netdev_dbg(ndev, "Disabling HSR Offload mode\n");
+ }
+}
+
/* netdev notifier */
static int prueth_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
@@ -1045,6 +1198,8 @@ static int prueth_netdevice_event(struct notifier_block *unused,
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
int ret = NOTIFY_DONE;
if (ndev->netdev_ops != &emac_netdev_ops)
@@ -1054,6 +1209,25 @@ static int prueth_netdevice_event(struct notifier_block *unused,
case NETDEV_CHANGEUPPER:
info = ptr;
+ if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
+ is_hsr_master(info->upper_dev)) {
+ if (info->linking) {
+ if (!prueth->hsr_dev) {
+ prueth->hsr_dev = info->upper_dev;
+ icssg_class_set_host_mac_addr(prueth->miig_rt,
+ prueth->hsr_dev->dev_addr);
+ } else {
+ if (prueth->hsr_dev != info->upper_dev) {
+ netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ prueth_hsr_port_link(ndev);
+ } else {
+ prueth_hsr_port_unlink(ndev);
+ }
+ }
+
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack);
@@ -1181,6 +1355,12 @@ static int prueth_probe(struct platform_device *pdev)
return -ENODEV;
}
+ prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats");
+ if (IS_ERR(prueth->pa_stats)) {
+ dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n");
+ prueth->pa_stats = NULL;
+ }
+
if (eth0_node) {
ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
if (ret)
@@ -1262,6 +1442,7 @@ static int prueth_probe(struct platform_device *pdev)
icss_iep_init_fw(prueth->iep1);
}
+ spin_lock_init(&prueth->vtbl_lock);
/* setup netdev interfaces */
if (eth0_node) {
ret = prueth_netdev_init(prueth, eth0_node);
@@ -1271,8 +1452,8 @@ static int prueth_probe(struct platform_device *pdev)
goto exit_iep;
}
- if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL))
- prueth->emac[PRUETH_MAC0]->half_duplex = 1;
+ prueth->emac[PRUETH_MAC0]->half_duplex =
+ of_property_read_bool(eth0_node, "ti,half-duplex-capable");
prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
}
@@ -1285,8 +1466,8 @@ static int prueth_probe(struct platform_device *pdev)
goto netdev_exit;
}
- if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL))
- prueth->emac[PRUETH_MAC1]->half_duplex = 1;
+ prueth->emac[PRUETH_MAC1]->half_duplex =
+ of_property_read_bool(eth1_node, "ti,half-duplex-capable");
prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
}
@@ -1452,6 +1633,7 @@ static const struct prueth_pdata am654_icssg_pdata = {
static const struct prueth_pdata am64x_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
+ .quirk_10m_link_issue = 1,
.switch_mode = 1,
};
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index f678d656a3ed..8722bb4a268a 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -50,13 +50,18 @@
#define ICSSG_MAX_RFLOWS 8 /* per slice */
+#define ICSSG_NUM_PA_STATS 4
+#define ICSSG_NUM_MIIG_STATS 60
/* Number of ICSSG related stats */
-#define ICSSG_NUM_STATS 60
+#define ICSSG_NUM_STATS (ICSSG_NUM_MIIG_STATS + ICSSG_NUM_PA_STATS)
#define ICSSG_NUM_STANDARD_STATS 31
#define ICSSG_NUM_ETHTOOL_STATS (ICSSG_NUM_STATS - ICSSG_NUM_STANDARD_STATS)
#define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */
+#define PRUETH_UNDIRECTED_PKT_DST_TAG 0
+#define PRUETH_UNDIRECTED_PKT_TAG_INS BIT(30)
+
/* Firmware status codes */
#define ICSS_HS_FW_READY 0x55555555
#define ICSS_HS_FW_DEAD 0xDEAD0000 /* lower 16 bits contain error code */
@@ -190,7 +195,8 @@ struct prueth_emac {
int port_vlan;
struct delayed_work stats_work;
- u64 stats[ICSSG_NUM_STATS];
+ u64 stats[ICSSG_NUM_MIIG_STATS];
+ u64 pa_stats[ICSSG_NUM_PA_STATS];
/* RX IRQ Coalescing Related */
struct hrtimer rx_hrtimer;
@@ -230,6 +236,7 @@ struct icssg_firmwares {
* @registered_netdevs: list of registered netdevs
* @miig_rt: regmap to mii_g_rt block
* @mii_rt: regmap to mii_rt block
+ * @pa_stats: regmap to pa_stats block
* @pru_id: ID for each of the PRUs
* @pdev: pointer to ICSSG platform device
* @pdata: pointer to platform data for ICSSG driver
@@ -239,11 +246,14 @@ struct icssg_firmwares {
* @iep1: pointer to IEP1 device
* @vlan_tbl: VLAN-FID table pointer
* @hw_bridge_dev: pointer to HW bridge net device
+ * @hsr_dev: pointer to the HSR net device
* @br_members: bitmask of bridge member ports
+ * @hsr_members: bitmask of hsr member ports
* @prueth_netdevice_nb: netdevice notifier block
* @prueth_switchdev_nb: switchdev notifier block
* @prueth_switchdev_bl_nb: switchdev blocking notifier block
* @is_switch_mode: flag to indicate if device is in Switch mode
+ * @is_hsr_offload_mode: flag to indicate if device is in hsr offload mode
* @is_switchmode_supported: indicates platform support for switch mode
* @switch_id: ID for mapping switch ports to bridge
* @default_vlan: Default VLAN for host
@@ -263,6 +273,7 @@ struct prueth {
struct net_device *registered_netdevs[PRUETH_NUM_MACS];
struct regmap *miig_rt;
struct regmap *mii_rt;
+ struct regmap *pa_stats;
enum pruss_pru_id pru_id[PRUSS_NUM_PRUS];
struct platform_device *pdev;
@@ -274,14 +285,19 @@ struct prueth {
struct prueth_vlan_tbl *vlan_tbl;
struct net_device *hw_bridge_dev;
+ struct net_device *hsr_dev;
u8 br_members;
+ u8 hsr_members;
struct notifier_block prueth_netdevice_nb;
struct notifier_block prueth_switchdev_nb;
struct notifier_block prueth_switchdev_bl_nb;
bool is_switch_mode;
+ bool is_hsr_offload_mode;
bool is_switchmode_supported;
unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN];
int default_vlan;
+ /** @vtbl_lock: Lock for vtbl in shared memory */
+ spinlock_t vtbl_lock;
};
struct emac_tx_ts_response {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index e180c1166170..292f04d29f4f 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -847,6 +847,7 @@ static int prueth_netdev_init(struct prueth *prueth,
}
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+ ndev->dev.of_node = eth_node;
ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
ndev->max_mtu = PRUETH_MAX_MTU;
ndev->netdev_ops = &emac_netdev_ops;
@@ -1045,8 +1046,8 @@ static int prueth_probe(struct platform_device *pdev)
goto exit_iep;
}
- if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL))
- prueth->emac[PRUETH_MAC0]->half_duplex = 1;
+ prueth->emac[PRUETH_MAC0]->half_duplex =
+ of_property_read_bool(eth0_node, "ti,half-duplex-capable");
prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
}
@@ -1059,8 +1060,8 @@ static int prueth_probe(struct platform_device *pdev)
goto netdev_exit;
}
- if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL))
- prueth->emac[PRUETH_MAC1]->half_duplex = 1;
+ prueth->emac[PRUETH_MAC1]->half_duplex =
+ of_property_read_bool(eth1_node, "ti,half-duplex-capable");
prueth->emac[PRUETH_MAC1]->iep = prueth->iep1;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 2fb150c13078..8800bd3a8d07 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -11,6 +11,7 @@
#define ICSSG_TX_PACKET_OFFSET 0xA0
#define ICSSG_TX_BYTE_OFFSET 0xEC
+#define ICSSG_FW_STATS_BASE 0x0248
static u32 stats_base[] = { 0x54c, /* Slice 0 stats start */
0xb18, /* Slice 1 stats start */
@@ -22,24 +23,34 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
int slice = prueth_emac_slice(emac);
u32 base = stats_base[slice];
u32 tx_pkt_cnt = 0;
- u32 val;
+ u32 val, reg;
int i;
- for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
regmap_read(prueth->miig_rt,
- base + icssg_all_stats[i].offset,
+ base + icssg_all_miig_stats[i].offset,
&val);
regmap_write(prueth->miig_rt,
- base + icssg_all_stats[i].offset,
+ base + icssg_all_miig_stats[i].offset,
val);
- if (icssg_all_stats[i].offset == ICSSG_TX_PACKET_OFFSET)
+ if (icssg_all_miig_stats[i].offset == ICSSG_TX_PACKET_OFFSET)
tx_pkt_cnt = val;
emac->stats[i] += val;
- if (icssg_all_stats[i].offset == ICSSG_TX_BYTE_OFFSET)
+ if (icssg_all_miig_stats[i].offset == ICSSG_TX_BYTE_OFFSET)
emac->stats[i] -= tx_pkt_cnt * 8;
}
+
+ if (prueth->pa_stats) {
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
+ reg = ICSSG_FW_STATS_BASE +
+ icssg_all_pa_stats[i].offset *
+ PRUETH_NUM_MACS + slice * sizeof(u32);
+ regmap_read(prueth->pa_stats, reg, &val);
+ emac->pa_stats[i] += val;
+ }
+ }
}
void icssg_stats_work_handler(struct work_struct *work)
@@ -57,9 +68,16 @@ int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name)
{
int i;
- for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) {
- if (!strcmp(icssg_all_stats[i].name, stat_name))
- return emac->stats[icssg_all_stats[i].offset / sizeof(u32)];
+ for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
+ if (!strcmp(icssg_all_miig_stats[i].name, stat_name))
+ return emac->stats[icssg_all_miig_stats[i].offset / sizeof(u32)];
+ }
+
+ if (emac->prueth->pa_stats) {
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
+ if (!strcmp(icssg_all_pa_stats[i].name, stat_name))
+ return emac->pa_stats[icssg_all_pa_stats[i].offset / sizeof(u32)];
+ }
}
netdev_err(emac->ndev, "Invalid stats %s\n", stat_name);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.h b/drivers/net/ethernet/ti/icssg/icssg_stats.h
index 999a4a91276c..e88b919f532c 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.h
@@ -77,82 +77,114 @@ struct miig_stats_regs {
u32 tx_bytes;
};
-#define ICSSG_STATS(field, stats_type) \
+#define ICSSG_MIIG_STATS(field, stats_type) \
{ \
#field, \
offsetof(struct miig_stats_regs, field), \
stats_type \
}
-struct icssg_stats {
+struct icssg_miig_stats {
char name[ETH_GSTRING_LEN];
u32 offset;
bool standard_stats;
};
-static const struct icssg_stats icssg_all_stats[] = {
+static const struct icssg_miig_stats icssg_all_miig_stats[] = {
/* Rx */
- ICSSG_STATS(rx_packets, true),
- ICSSG_STATS(rx_broadcast_frames, false),
- ICSSG_STATS(rx_multicast_frames, true),
- ICSSG_STATS(rx_crc_errors, true),
- ICSSG_STATS(rx_mii_error_frames, false),
- ICSSG_STATS(rx_odd_nibble_frames, false),
- ICSSG_STATS(rx_frame_max_size, true),
- ICSSG_STATS(rx_max_size_error_frames, false),
- ICSSG_STATS(rx_frame_min_size, true),
- ICSSG_STATS(rx_min_size_error_frames, false),
- ICSSG_STATS(rx_over_errors, true),
- ICSSG_STATS(rx_class0_hits, false),
- ICSSG_STATS(rx_class1_hits, false),
- ICSSG_STATS(rx_class2_hits, false),
- ICSSG_STATS(rx_class3_hits, false),
- ICSSG_STATS(rx_class4_hits, false),
- ICSSG_STATS(rx_class5_hits, false),
- ICSSG_STATS(rx_class6_hits, false),
- ICSSG_STATS(rx_class7_hits, false),
- ICSSG_STATS(rx_class8_hits, false),
- ICSSG_STATS(rx_class9_hits, false),
- ICSSG_STATS(rx_class10_hits, false),
- ICSSG_STATS(rx_class11_hits, false),
- ICSSG_STATS(rx_class12_hits, false),
- ICSSG_STATS(rx_class13_hits, false),
- ICSSG_STATS(rx_class14_hits, false),
- ICSSG_STATS(rx_class15_hits, false),
- ICSSG_STATS(rx_smd_frags, false),
- ICSSG_STATS(rx_bucket1_size, true),
- ICSSG_STATS(rx_bucket2_size, true),
- ICSSG_STATS(rx_bucket3_size, true),
- ICSSG_STATS(rx_bucket4_size, true),
- ICSSG_STATS(rx_64B_frames, true),
- ICSSG_STATS(rx_bucket1_frames, true),
- ICSSG_STATS(rx_bucket2_frames, true),
- ICSSG_STATS(rx_bucket3_frames, true),
- ICSSG_STATS(rx_bucket4_frames, true),
- ICSSG_STATS(rx_bucket5_frames, true),
- ICSSG_STATS(rx_bytes, true),
- ICSSG_STATS(rx_tx_total_bytes, false),
+ ICSSG_MIIG_STATS(rx_packets, true),
+ ICSSG_MIIG_STATS(rx_broadcast_frames, false),
+ ICSSG_MIIG_STATS(rx_multicast_frames, true),
+ ICSSG_MIIG_STATS(rx_crc_errors, true),
+ ICSSG_MIIG_STATS(rx_mii_error_frames, false),
+ ICSSG_MIIG_STATS(rx_odd_nibble_frames, false),
+ ICSSG_MIIG_STATS(rx_frame_max_size, true),
+ ICSSG_MIIG_STATS(rx_max_size_error_frames, false),
+ ICSSG_MIIG_STATS(rx_frame_min_size, true),
+ ICSSG_MIIG_STATS(rx_min_size_error_frames, false),
+ ICSSG_MIIG_STATS(rx_over_errors, true),
+ ICSSG_MIIG_STATS(rx_class0_hits, false),
+ ICSSG_MIIG_STATS(rx_class1_hits, false),
+ ICSSG_MIIG_STATS(rx_class2_hits, false),
+ ICSSG_MIIG_STATS(rx_class3_hits, false),
+ ICSSG_MIIG_STATS(rx_class4_hits, false),
+ ICSSG_MIIG_STATS(rx_class5_hits, false),
+ ICSSG_MIIG_STATS(rx_class6_hits, false),
+ ICSSG_MIIG_STATS(rx_class7_hits, false),
+ ICSSG_MIIG_STATS(rx_class8_hits, false),
+ ICSSG_MIIG_STATS(rx_class9_hits, false),
+ ICSSG_MIIG_STATS(rx_class10_hits, false),
+ ICSSG_MIIG_STATS(rx_class11_hits, false),
+ ICSSG_MIIG_STATS(rx_class12_hits, false),
+ ICSSG_MIIG_STATS(rx_class13_hits, false),
+ ICSSG_MIIG_STATS(rx_class14_hits, false),
+ ICSSG_MIIG_STATS(rx_class15_hits, false),
+ ICSSG_MIIG_STATS(rx_smd_frags, false),
+ ICSSG_MIIG_STATS(rx_bucket1_size, true),
+ ICSSG_MIIG_STATS(rx_bucket2_size, true),
+ ICSSG_MIIG_STATS(rx_bucket3_size, true),
+ ICSSG_MIIG_STATS(rx_bucket4_size, true),
+ ICSSG_MIIG_STATS(rx_64B_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket1_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket2_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket3_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket4_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket5_frames, true),
+ ICSSG_MIIG_STATS(rx_bytes, true),
+ ICSSG_MIIG_STATS(rx_tx_total_bytes, false),
/* Tx */
- ICSSG_STATS(tx_packets, true),
- ICSSG_STATS(tx_broadcast_frames, false),
- ICSSG_STATS(tx_multicast_frames, false),
- ICSSG_STATS(tx_odd_nibble_frames, false),
- ICSSG_STATS(tx_underflow_errors, false),
- ICSSG_STATS(tx_frame_max_size, true),
- ICSSG_STATS(tx_max_size_error_frames, false),
- ICSSG_STATS(tx_frame_min_size, true),
- ICSSG_STATS(tx_min_size_error_frames, false),
- ICSSG_STATS(tx_bucket1_size, true),
- ICSSG_STATS(tx_bucket2_size, true),
- ICSSG_STATS(tx_bucket3_size, true),
- ICSSG_STATS(tx_bucket4_size, true),
- ICSSG_STATS(tx_64B_frames, true),
- ICSSG_STATS(tx_bucket1_frames, true),
- ICSSG_STATS(tx_bucket2_frames, true),
- ICSSG_STATS(tx_bucket3_frames, true),
- ICSSG_STATS(tx_bucket4_frames, true),
- ICSSG_STATS(tx_bucket5_frames, true),
- ICSSG_STATS(tx_bytes, true),
+ ICSSG_MIIG_STATS(tx_packets, true),
+ ICSSG_MIIG_STATS(tx_broadcast_frames, false),
+ ICSSG_MIIG_STATS(tx_multicast_frames, false),
+ ICSSG_MIIG_STATS(tx_odd_nibble_frames, false),
+ ICSSG_MIIG_STATS(tx_underflow_errors, false),
+ ICSSG_MIIG_STATS(tx_frame_max_size, true),
+ ICSSG_MIIG_STATS(tx_max_size_error_frames, false),
+ ICSSG_MIIG_STATS(tx_frame_min_size, true),
+ ICSSG_MIIG_STATS(tx_min_size_error_frames, false),
+ ICSSG_MIIG_STATS(tx_bucket1_size, true),
+ ICSSG_MIIG_STATS(tx_bucket2_size, true),
+ ICSSG_MIIG_STATS(tx_bucket3_size, true),
+ ICSSG_MIIG_STATS(tx_bucket4_size, true),
+ ICSSG_MIIG_STATS(tx_64B_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket1_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket2_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket3_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket4_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket5_frames, true),
+ ICSSG_MIIG_STATS(tx_bytes, true),
+};
+
+/**
+ * struct pa_stats_regs - ICSSG Firmware maintained PA Stats register
+ * @fw_rx_cnt: Number of valid packets sent by Rx PRU to Host on PSI
+ * @fw_tx_cnt: Number of valid packets copied by RTU0 to Tx queues
+ * @fw_tx_pre_overflow: Host Egress Q (Pre-emptible) Overflow Counter
+ * @fw_tx_exp_overflow: Host Egress Q (Express) Overflow Counter
+ */
+struct pa_stats_regs {
+ u32 fw_rx_cnt;
+ u32 fw_tx_cnt;
+ u32 fw_tx_pre_overflow;
+ u32 fw_tx_exp_overflow;
+};
+
+#define ICSSG_PA_STATS(field) \
+{ \
+ #field, \
+ offsetof(struct pa_stats_regs, field), \
+}
+
+struct icssg_pa_stats {
+ char name[ETH_GSTRING_LEN];
+ u32 offset;
+};
+
+static const struct icssg_pa_stats icssg_all_pa_stats[] = {
+ ICSSG_PA_STATS(fw_rx_cnt),
+ ICSSG_PA_STATS(fw_tx_cnt),
+ ICSSG_PA_STATS(fw_tx_pre_overflow),
+ ICSSG_PA_STATS(fw_tx_exp_overflow),
};
#endif /* __NET_TI_ICSSG_STATS_H */
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index d286709ca3b9..63e686f0b119 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2012,8 +2012,6 @@ static int keystone_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
info->tx_types =
@@ -2030,10 +2028,7 @@ static int keystone_get_ts_info(struct net_device *ndev,
struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = 0;
info->rx_filters = 0;
return 0;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 87e67121477c..a4937c18d7cb 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2277,10 +2277,11 @@ spider_net_setup_netdev(struct spider_net_card *card)
netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
if (SPIDER_NET_RX_CSUM_DEFAULT)
netdev->features |= NETIF_F_RXCSUM;
- netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
+ netdev->features |= NETIF_F_IP_CSUM;
/* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
* NETIF_F_HW_VLAN_CTAG_FILTER
*/
+ netdev->lltx = true;
/* MTU range: 64 - 2294 */
netdev->min_mtu = SPIDER_NET_MIN_MTU;
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index edd8b59680e5..a04d4073def9 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -377,8 +377,8 @@ static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb,
int ret;
bool first = true;
- if (txb->len < 60)
- pad = 60 - txb->len;
+ if (txb->len < ETH_ZLEN)
+ pad = ETH_ZLEN - txb->len;
while (1) {
mse102x_tx_cmd_spi(mse, CMD_RTS | (txb->len + pad));
@@ -451,7 +451,7 @@ static void mse102x_tx_work(struct work_struct *work)
if (ret == -ETIMEDOUT) {
if (netif_msg_timer(mse))
- netdev_err(mse->ndev, "tx work timeout\n");
+ netdev_err_once(mse->ndev, "tx work timeout\n");
mse->stats.tx_timeout++;
}
@@ -485,8 +485,8 @@ static void mse102x_init_mac(struct mse102x_net *mse, struct device_node *np)
if (ret) {
eth_hw_addr_random(ndev);
- netdev_err(ndev, "Using random MAC address: %pM\n",
- ndev->dev_addr);
+ dev_warn(ndev->dev.parent, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
}
}
@@ -622,8 +622,6 @@ static const struct ethtool_ops mse102x_ethtool_ops = {
/* driver bus management functions */
-#ifdef CONFIG_PM_SLEEP
-
static int mse102x_suspend(struct device *dev)
{
struct mse102x_net *mse = dev_get_drvdata(dev);
@@ -649,9 +647,8 @@ static int mse102x_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume);
static int mse102x_probe_spi(struct spi_device *spi)
{
@@ -736,9 +733,6 @@ static void mse102x_remove_spi(struct spi_device *spi)
struct mse102x_net *mse = dev_get_drvdata(&spi->dev);
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
- if (netif_msg_drv(mse))
- dev_info(&spi->dev, "remove\n");
-
mse102x_remove_device_debugfs(mses);
unregister_netdev(mse->ndev);
}
@@ -761,7 +755,7 @@ static struct spi_driver mse102x_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = mse102x_match_table,
- .pm = &mse102x_pm_ops,
+ .pm = pm_sleep_ptr(&mse102x_pm_ops),
},
.probe = mse102x_probe_spi,
.remove = mse102x_remove_spi,
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index 85cdbdd44fec..e46ccebcfd22 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -41,10 +41,9 @@ config TXGBE
tristate "Wangxun(R) 10GbE PCI Express adapters support"
depends on PCI
depends on COMMON_CLK
+ depends on I2C_DESIGNWARE_PLATFORM
select MARVELL_10G_PHY
select REGMAP
- select I2C
- select I2C_DESIGNWARE_PLATFORM
select PHYLINK
select HWMON if TXGBE=y
select SFP
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 1eecba984f3b..2b3d6586f44a 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -251,10 +251,7 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
rx_buffer->page_offset;
/* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
-#endif
+ net_prefetch(page_addr);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 1d57b047817b..b54bffda027b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -426,9 +426,9 @@ enum WX_MSCA_CMD_value {
#define WX_MIN_RXD 128
#define WX_MIN_TXD 128
-/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 8
-#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8
+/* Number of Transmit and Receive Descriptors must be a multiple of 128 */
+#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 128
+#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 128
#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
#define VMDQ_P(p) p
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index ec54b18c5fe7..a5e9b779c44d 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -124,8 +124,12 @@ static int ngbe_phylink_init(struct wx *wx)
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
config->mac_managed_pm = true;
- phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
- __set_bit(PHY_INTERFACE_MODE_RGMII_ID, config->supported_interfaces);
+ /* The MAC only has add the Tx delay and it can not be modified.
+ * So just disable TX delay in PHY, and it is does not matter to
+ * internal phy.
+ */
+ phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, config->supported_interfaces);
phylink = phylink_create(config, NULL, phy_mode, &ngbe_mac_ops);
if (IS_ERR(phylink))
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index d6b2b3c781b6..cd1372da92a9 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -103,8 +103,7 @@ static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum)
if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM)
*checksum += local_buffer[i];
- if (eeprom_ptrs)
- kvfree(eeprom_ptrs);
+ kvfree(eeprom_ptrs);
*checksum = TXGBE_EEPROM_SUM - *checksum;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 5f502265f0a6..67b61afdde96 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -688,8 +688,7 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe)
mii_bus->parent = &pdev->dev;
mii_bus->phy_mask = GENMASK(31, 1);
mii_bus->priv = wx;
- snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x",
- (pdev->bus->number << 8) | pdev->devfn);
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x", pci_dev_id(pdev));
ret = devm_mdiobus_register(&pdev->dev, mii_bus);
if (ret) {
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index fa5500decc96..d64b8abcf018 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -29,26 +29,26 @@
/* Configuration options */
/* Accept all incoming packets. Default: disabled (cleared) */
-#define XAE_OPTION_PROMISC (1 << 0)
+#define XAE_OPTION_PROMISC BIT(0)
/* Jumbo frame support for Tx & Rx. Default: disabled (cleared) */
-#define XAE_OPTION_JUMBO (1 << 1)
+#define XAE_OPTION_JUMBO BIT(1)
/* VLAN Rx & Tx frame support. Default: disabled (cleared) */
-#define XAE_OPTION_VLAN (1 << 2)
+#define XAE_OPTION_VLAN BIT(2)
/* Enable recognition of flow control frames on Rx. Default: enabled (set) */
-#define XAE_OPTION_FLOW_CONTROL (1 << 4)
+#define XAE_OPTION_FLOW_CONTROL BIT(4)
/* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not
* stripped. Default: disabled (set)
*/
-#define XAE_OPTION_FCS_STRIP (1 << 5)
+#define XAE_OPTION_FCS_STRIP BIT(5)
/* Generate FCS field and add PAD automatically for outgoing frames.
* Default: enabled (set)
*/
-#define XAE_OPTION_FCS_INSERT (1 << 6)
+#define XAE_OPTION_FCS_INSERT BIT(6)
/* Enable Length/Type error checking for incoming frames. When this option is
* set, the MAC will filter frames that have a mismatched type/length field
@@ -56,13 +56,13 @@
* types of frames are encountered. When this option is cleared, the MAC will
* allow these types of frames to be received. Default: enabled (set)
*/
-#define XAE_OPTION_LENTYPE_ERR (1 << 7)
+#define XAE_OPTION_LENTYPE_ERR BIT(7)
/* Enable the transmitter. Default: enabled (set) */
-#define XAE_OPTION_TXEN (1 << 11)
+#define XAE_OPTION_TXEN BIT(11)
/* Enable the receiver. Default: enabled (set) */
-#define XAE_OPTION_RXEN (1 << 12)
+#define XAE_OPTION_RXEN BIT(12)
/* Default options set when device is initialized or reset */
#define XAE_OPTION_DEFAULTS \
@@ -156,22 +156,27 @@
#define XAE_TPID0_OFFSET 0x00000028 /* VLAN TPID0 register */
#define XAE_TPID1_OFFSET 0x0000002C /* VLAN TPID1 register */
#define XAE_PPST_OFFSET 0x00000030 /* PCS PMA Soft Temac Status Reg */
+#define XAE_STATS_OFFSET 0x00000200 /* Statistics counters */
#define XAE_RCW0_OFFSET 0x00000400 /* Rx Configuration Word 0 */
#define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */
#define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */
#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
-#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
-#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
+#define XAE_EMMC_OFFSET 0x00000410 /* MAC speed configuration */
+#define XAE_PHYC_OFFSET 0x00000414 /* RX Max Frame Configuration */
#define XAE_ID_OFFSET 0x000004F8 /* Identification register */
-#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
-#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
-#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
-#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
+#define XAE_ABILITY_OFFSET 0x000004FC /* Ability Register offset */
+#define XAE_MDIO_MC_OFFSET 0x00000500 /* MDIO Setup */
+#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MDIO Control */
+#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MDIO Write Data */
+#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MDIO Read Data */
#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
-#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
+#define XAE_FMI_OFFSET 0x00000708 /* Frame Filter Control */
+#define XAE_FFE_OFFSET 0x0000070C /* Frame Filter Enable */
#define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */
#define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */
+#define XAE_AM0_OFFSET 0x00000750 /* Frame Filter Mask Value Bytes 3-0 */
+#define XAE_AM1_OFFSET 0x00000754 /* Frame Filter Mask Value Bytes 7-4 */
#define XAE_TX_VLAN_DATA_OFFSET 0x00004000 /* TX VLAN data table address */
#define XAE_RX_VLAN_DATA_OFFSET 0x00008000 /* RX VLAN data table address */
@@ -283,6 +288,16 @@
#define XAE_PHYC_SGLINKSPD_100 0x40000000 /* SGMII link 100 Mbit */
#define XAE_PHYC_SGLINKSPD_1000 0x80000000 /* SGMII link 1000 Mbit */
+/* Bit masks for Axi Ethernet ability register */
+#define XAE_ABILITY_PFC BIT(16)
+#define XAE_ABILITY_FRAME_FILTER BIT(10)
+#define XAE_ABILITY_HALF_DUPLEX BIT(9)
+#define XAE_ABILITY_STATS BIT(8)
+#define XAE_ABILITY_2_5G BIT(3)
+#define XAE_ABILITY_1G BIT(2)
+#define XAE_ABILITY_100M BIT(1)
+#define XAE_ABILITY_10M BIT(0)
+
/* Bit masks for Axi Ethernet MDIO interface MC register */
#define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable */
#define XAE_MDIO_MC_CLOCK_DIVIDE_MAX 0x3F /* Maximum MDIO divisor */
@@ -308,7 +323,7 @@
*/
#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
-/* Bit masks for Axi Ethernet FMI register */
+/* Bit masks for Axi Ethernet FMC register */
#define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */
#define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */
@@ -326,11 +341,12 @@
#define XAE_MULTICAST_CAM_TABLE_NUM 4
/* Axi Ethernet Synthesis features */
-#define XAE_FEATURE_PARTIAL_RX_CSUM (1 << 0)
-#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1)
-#define XAE_FEATURE_FULL_RX_CSUM (1 << 2)
-#define XAE_FEATURE_FULL_TX_CSUM (1 << 3)
-#define XAE_FEATURE_DMA_64BIT (1 << 4)
+#define XAE_FEATURE_PARTIAL_RX_CSUM BIT(0)
+#define XAE_FEATURE_PARTIAL_TX_CSUM BIT(1)
+#define XAE_FEATURE_FULL_RX_CSUM BIT(2)
+#define XAE_FEATURE_FULL_TX_CSUM BIT(3)
+#define XAE_FEATURE_DMA_64BIT BIT(4)
+#define XAE_FEATURE_STATS BIT(5)
#define XAE_NO_CSUM_OFFLOAD 0
@@ -344,6 +360,61 @@
#define XLNX_MII_STD_SELECT_REG 0x11
#define XLNX_MII_STD_SELECT_SGMII BIT(0)
+/* enum temac_stat - TEMAC statistics counters
+ *
+ * Index of statistics counters within the TEMAC. This must match the
+ * order/offset of hardware registers exactly.
+ */
+enum temac_stat {
+ STAT_RX_BYTES = 0,
+ STAT_TX_BYTES,
+ STAT_UNDERSIZE_FRAMES,
+ STAT_FRAGMENT_FRAMES,
+ STAT_RX_64_BYTE_FRAMES,
+ STAT_RX_65_127_BYTE_FRAMES,
+ STAT_RX_128_255_BYTE_FRAMES,
+ STAT_RX_256_511_BYTE_FRAMES,
+ STAT_RX_512_1023_BYTE_FRAMES,
+ STAT_RX_1024_MAX_BYTE_FRAMES,
+ STAT_RX_OVERSIZE_FRAMES,
+ STAT_TX_64_BYTE_FRAMES,
+ STAT_TX_65_127_BYTE_FRAMES,
+ STAT_TX_128_255_BYTE_FRAMES,
+ STAT_TX_256_511_BYTE_FRAMES,
+ STAT_TX_512_1023_BYTE_FRAMES,
+ STAT_TX_1024_MAX_BYTE_FRAMES,
+ STAT_TX_OVERSIZE_FRAMES,
+ STAT_RX_GOOD_FRAMES,
+ STAT_RX_FCS_ERRORS,
+ STAT_RX_BROADCAST_FRAMES,
+ STAT_RX_MULTICAST_FRAMES,
+ STAT_RX_CONTROL_FRAMES,
+ STAT_RX_LENGTH_ERRORS,
+ STAT_RX_VLAN_FRAMES,
+ STAT_RX_PAUSE_FRAMES,
+ STAT_RX_CONTROL_OPCODE_ERRORS,
+ STAT_TX_GOOD_FRAMES,
+ STAT_TX_BROADCAST_FRAMES,
+ STAT_TX_MULTICAST_FRAMES,
+ STAT_TX_UNDERRUN_ERRORS,
+ STAT_TX_CONTROL_FRAMES,
+ STAT_TX_VLAN_FRAMES,
+ STAT_TX_PAUSE_FRAMES,
+ STAT_TX_SINGLE_COLLISION_FRAMES,
+ STAT_TX_MULTIPLE_COLLISION_FRAMES,
+ STAT_TX_DEFERRED_FRAMES,
+ STAT_TX_LATE_COLLISIONS,
+ STAT_TX_EXCESS_COLLISIONS,
+ STAT_TX_EXCESS_DEFERRAL,
+ STAT_RX_ALIGNMENT_ERRORS,
+ STAT_TX_PFC_FRAMES,
+ STAT_RX_PFC_FRAMES,
+ STAT_USER_DEFINED0,
+ STAT_USER_DEFINED1,
+ STAT_USER_DEFINED2,
+ STAT_COUNT,
+};
+
/**
* struct axidma_bd - Axi Dma buffer descriptor layout
* @next: MM2S/S2MM Next Descriptor Pointer
@@ -434,7 +505,19 @@ struct skbuf_dma_descriptor {
* @tx_packets: TX packet count for statistics
* @tx_bytes: TX byte count for statistics
* @tx_stat_sync: Synchronization object for TX stats
+ * @hw_stat_base: Base offset for statistics counters. This may be nonzero if
+ * the statistics counteres were reset or wrapped around.
+ * @hw_last_counter: Last-seen value of each statistic counter
+ * @reset_in_progress: Set while we are performing a reset and statistics
+ * counters may be invalid
+ * @hw_stats_seqcount: Sequence counter for @hw_stat_base, @hw_last_counter,
+ * and @reset_in_progress.
+ * @stats_lock: Lock for @hw_stats_seqcount
+ * @stats_work: Work for reading the hardware statistics counters often enough
+ * to catch overflows.
* @dma_err_task: Work structure to process Axi DMA errors
+ * @stopping: Set when @dma_err_task shouldn't do anything because we are
+ * about to stop the device.
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
* @eth_irq: Ethernet core IRQ number
@@ -446,8 +529,6 @@ struct skbuf_dma_descriptor {
* supported, the maximum frame size would be 9k. Else it is
* 1522 bytes (assuming support for basic VLAN)
* @rxmem: Stores rx memory size for jumbo frame handling.
- * @csum_offload_on_tx_path: Stores the checksum selection on TX side.
- * @csum_offload_on_rx_path: Stores the checksum selection on RX side.
* @coalesce_count_rx: Store the irq coalesce on RX side.
* @coalesce_usec_rx: IRQ coalesce delay for RX
* @coalesce_count_tx: Store the irq coalesce on TX side.
@@ -505,7 +586,15 @@ struct axienet_local {
u64_stats_t tx_bytes;
struct u64_stats_sync tx_stat_sync;
+ u64 hw_stat_base[STAT_COUNT];
+ u32 hw_last_counter[STAT_COUNT];
+ seqcount_mutex_t hw_stats_seqcount;
+ struct mutex stats_lock;
+ struct delayed_work stats_work;
+ bool reset_in_progress;
+
struct work_struct dma_err_task;
+ bool stopping;
int tx_irq;
int rx_irq;
@@ -518,9 +607,6 @@ struct axienet_local {
u32 max_frm_size;
u32 rxmem;
- int csum_offload_on_tx_path;
- int csum_offload_on_rx_path;
-
u32 coalesce_count_rx;
u32 coalesce_usec_rx;
u32 coalesce_count_tx;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 02fdf66e07fa..d940853acc0b 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -415,6 +415,7 @@ static void axienet_set_mac_address(struct net_device *ndev,
static int netdev_set_mac_address(struct net_device *ndev, void *p)
{
struct sockaddr *addr = p;
+
axienet_set_mac_address(ndev, addr->sa_data);
return 0;
}
@@ -432,25 +433,31 @@ static int netdev_set_mac_address(struct net_device *ndev, void *p)
*/
static void axienet_set_multicast_list(struct net_device *ndev)
{
- int i;
+ int i = 0;
u32 reg, af0reg, af1reg;
struct axienet_local *lp = netdev_priv(ndev);
- if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
- netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
- /* We must make the kernel realize we had to move into
- * promiscuous mode. If it was a promiscuous mode request
- * the flag is already set. If not we set it.
- */
- ndev->flags |= IFF_PROMISC;
- reg = axienet_ior(lp, XAE_FMI_OFFSET);
+ reg = axienet_ior(lp, XAE_FMI_OFFSET);
+ reg &= ~XAE_FMI_PM_MASK;
+ if (ndev->flags & IFF_PROMISC)
reg |= XAE_FMI_PM_MASK;
+ else
+ reg &= ~XAE_FMI_PM_MASK;
+ axienet_iow(lp, XAE_FMI_OFFSET, reg);
+
+ if (ndev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
+ reg &= 0xFFFFFF00;
axienet_iow(lp, XAE_FMI_OFFSET, reg);
- dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
+ axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
+ axienet_iow(lp, XAE_AF1_OFFSET, 0);
+ axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
+ axienet_iow(lp, XAE_AM1_OFFSET, 0);
+ axienet_iow(lp, XAE_FFE_OFFSET, 1);
+ i = 1;
} else if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
- i = 0;
netdev_for_each_mc_addr(ha, ndev) {
if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
break;
@@ -463,30 +470,24 @@ static void axienet_set_multicast_list(struct net_device *ndev)
af1reg = (ha->addr[4]);
af1reg |= (ha->addr[5] << 8);
- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+ reg &= 0xFFFFFF00;
reg |= i;
axienet_iow(lp, XAE_FMI_OFFSET, reg);
axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
+ axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
+ axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
+ axienet_iow(lp, XAE_FFE_OFFSET, 1);
i++;
}
- } else {
- reg = axienet_ior(lp, XAE_FMI_OFFSET);
- reg &= ~XAE_FMI_PM_MASK;
+ }
+ for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
+ reg &= 0xFFFFFF00;
+ reg |= i;
axienet_iow(lp, XAE_FMI_OFFSET, reg);
-
- for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
- reg |= i;
-
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
- axienet_iow(lp, XAE_AF0_OFFSET, 0);
- axienet_iow(lp, XAE_AF1_OFFSET, 0);
- }
-
- dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
+ axienet_iow(lp, XAE_FFE_OFFSET, 0);
}
}
@@ -518,11 +519,55 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
+static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
+{
+ u32 counter;
+
+ if (lp->reset_in_progress)
+ return lp->hw_stat_base[stat];
+
+ counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
+ return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
+}
+
+static void axienet_stats_update(struct axienet_local *lp, bool reset)
+{
+ enum temac_stat stat;
+
+ write_seqcount_begin(&lp->hw_stats_seqcount);
+ lp->reset_in_progress = reset;
+ for (stat = 0; stat < STAT_COUNT; stat++) {
+ u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
+
+ lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
+ lp->hw_last_counter[stat] = counter;
+ }
+ write_seqcount_end(&lp->hw_stats_seqcount);
+}
+
+static void axienet_refresh_stats(struct work_struct *work)
+{
+ struct axienet_local *lp = container_of(work, struct axienet_local,
+ stats_work.work);
+
+ mutex_lock(&lp->stats_lock);
+ axienet_stats_update(lp, false);
+ mutex_unlock(&lp->stats_lock);
+
+ /* Just less than 2^32 bytes at 2.5 GBit/s */
+ schedule_delayed_work(&lp->stats_work, 13 * HZ);
+}
+
static int __axienet_device_reset(struct axienet_local *lp)
{
u32 value;
int ret;
+ /* Save statistics counters in case they will be reset */
+ mutex_lock(&lp->stats_lock);
+ if (lp->features & XAE_FEATURE_STATS)
+ axienet_stats_update(lp, true);
+
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
@@ -537,7 +582,7 @@ static int __axienet_device_reset(struct axienet_local *lp)
XAXIDMA_TX_CR_OFFSET);
if (ret) {
dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
- return ret;
+ goto out;
}
/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
@@ -547,10 +592,29 @@ static int __axienet_device_reset(struct axienet_local *lp)
XAE_IS_OFFSET);
if (ret) {
dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
- return ret;
+ goto out;
}
- return 0;
+ /* Update statistics counters with new values */
+ if (lp->features & XAE_FEATURE_STATS) {
+ enum temac_stat stat;
+
+ write_seqcount_begin(&lp->hw_stats_seqcount);
+ lp->reset_in_progress = false;
+ for (stat = 0; stat < STAT_COUNT; stat++) {
+ u32 counter =
+ axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
+
+ lp->hw_stat_base[stat] +=
+ lp->hw_last_counter[stat] - counter;
+ lp->hw_last_counter[stat] = counter;
+ }
+ write_seqcount_end(&lp->hw_stats_seqcount);
+ }
+
+out:
+ mutex_unlock(&lp->stats_lock);
+ return ret;
}
/**
@@ -613,8 +677,7 @@ static int axienet_device_reset(struct net_device *ndev)
lp->options |= XAE_OPTION_VLAN;
lp->options &= (~XAE_OPTION_JUMBO);
- if ((ndev->mtu > XAE_MTU) &&
- (ndev->mtu <= XAE_JUMBO_MTU)) {
+ if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
XAE_TRL_SIZE;
@@ -673,15 +736,15 @@ static int axienet_device_reset(struct net_device *ndev)
*
* Would either be called after a successful transmit operation, or after
* there was an error when setting up the chain.
- * Returns the number of descriptors handled.
+ * Returns the number of packets handled.
*/
static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
int nr_bds, bool force, u32 *sizep, int budget)
{
struct axidma_bd *cur_p;
unsigned int status;
+ int i, packets = 0;
dma_addr_t phys;
- int i;
for (i = 0; i < nr_bds; i++) {
cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
@@ -700,8 +763,10 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
- if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
napi_consume_skb(cur_p->skb, budget);
+ packets++;
+ }
cur_p->app0 = 0;
cur_p->app1 = 0;
@@ -717,7 +782,13 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
}
- return i;
+ if (!force) {
+ lp->tx_bd_ci += i;
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci %= lp->tx_bd_num;
+ }
+
+ return packets;
}
/**
@@ -890,13 +961,10 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
u32 size = 0;
int packets;
- packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
+ packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
+ &size, budget);
if (packets) {
- lp->tx_bd_ci += packets;
- if (lp->tx_bd_ci >= lp->tx_bd_num)
- lp->tx_bd_ci %= lp->tx_bd_num;
-
u64_stats_update_begin(&lp->tx_stat_sync);
u64_stats_add(&lp->tx_packets, packets);
u64_stats_add(&lp->tx_bytes, size);
@@ -983,6 +1051,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
desc_set_phys_addr(lp, phys, cur_p);
@@ -1003,6 +1072,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ndev->stats.tx_dropped++;
axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
true, NULL, 0);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
desc_set_phys_addr(lp, phys, cur_p);
@@ -1125,9 +1195,7 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget)
csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
- } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
- skb->protocol == htons(ETH_P_IP) &&
- skb->len > 64) {
+ } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
}
@@ -1221,9 +1289,10 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
u32 cr = lp->tx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- napi_schedule(&lp->napi_tx);
+ if (napi_schedule_prep(&lp->napi_tx)) {
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ __napi_schedule(&lp->napi_tx);
+ }
}
return IRQ_HANDLED;
@@ -1265,9 +1334,10 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
u32 cr = lp->rx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- napi_schedule(&lp->napi_rx);
+ if (napi_schedule_prep(&lp->napi_rx)) {
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ __napi_schedule(&lp->napi_rx);
+ }
}
return IRQ_HANDLED;
@@ -1296,7 +1366,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
ndev->stats.rx_missed_errors++;
if (pending & XAE_INT_RXRJECT_MASK)
- ndev->stats.rx_frame_errors++;
+ ndev->stats.rx_dropped++;
axienet_iow(lp, XAE_IS_OFFSET, pending);
return IRQ_HANDLED;
@@ -1459,6 +1529,7 @@ static int axienet_init_legacy_dma(struct net_device *ndev)
struct axienet_local *lp = netdev_priv(ndev);
/* Enable worker thread for Axi DMA error handling */
+ lp->stopping = false;
INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
napi_enable(&lp->napi_rx);
@@ -1514,8 +1585,6 @@ static int axienet_open(struct net_device *ndev)
int ret;
struct axienet_local *lp = netdev_priv(ndev);
- dev_dbg(&ndev->dev, "%s\n", __func__);
-
/* When we do an Axi Ethernet reset, it resets the complete core
* including the MDIO. MDIO must be disabled before resetting.
* Hold MDIO bus lock to avoid MDIO accesses during the reset.
@@ -1532,6 +1601,9 @@ static int axienet_open(struct net_device *ndev)
phylink_start(lp->phylink);
+ /* Start the statistics refresh work */
+ schedule_delayed_work(&lp->stats_work, 0);
+
if (lp->use_dmaengine) {
/* Enable interrupts for Axi Ethernet core (if defined) */
if (lp->eth_irq > 0) {
@@ -1556,6 +1628,7 @@ err_free_eth_irq:
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
err_phy:
+ cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
return ret;
@@ -1576,13 +1649,16 @@ static int axienet_stop(struct net_device *ndev)
struct axienet_local *lp = netdev_priv(ndev);
int i;
- dev_dbg(&ndev->dev, "axienet_close()\n");
-
if (!lp->use_dmaengine) {
+ WRITE_ONCE(lp->stopping, true);
+ flush_work(&lp->dma_err_task);
+
napi_disable(&lp->napi_tx);
napi_disable(&lp->napi_rx);
}
+ cancel_delayed_work_sync(&lp->stats_work);
+
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
@@ -1657,6 +1733,7 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
static void axienet_poll_controller(struct net_device *ndev)
{
struct axienet_local *lp = netdev_priv(ndev);
+
disable_irq(lp->tx_irq);
disable_irq(lp->rx_irq);
axienet_rx_irq(lp->tx_irq, ndev);
@@ -1695,6 +1772,35 @@ axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_packets = u64_stats_read(&lp->tx_packets);
stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ stats->rx_length_errors =
+ axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
+ stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
+ stats->rx_frame_errors =
+ axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
+ stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
+ axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
+ stats->rx_length_errors +
+ stats->rx_crc_errors +
+ stats->rx_frame_errors;
+ stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
+
+ stats->tx_aborted_errors =
+ axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
+ stats->tx_fifo_errors =
+ axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
+ stats->tx_window_errors =
+ axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
+ stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
+ stats->tx_aborted_errors +
+ stats->tx_fifo_errors +
+ stats->tx_window_errors;
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
}
static const struct net_device_ops axienet_netdev_ops = {
@@ -1987,6 +2093,213 @@ static int axienet_ethtools_nway_reset(struct net_device *dev)
return phylink_ethtool_nway_reset(lp->phylink);
}
+static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ data[0] = axienet_stat(lp, STAT_RX_BYTES);
+ data[1] = axienet_stat(lp, STAT_TX_BYTES);
+ data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
+ data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
+ data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
+ data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
+ data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
+ data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
+ data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+}
+
+static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
+ "Received bytes",
+ "Transmitted bytes",
+ "RX Good VLAN Tagged Frames",
+ "TX Good VLAN Tagged Frames",
+ "TX Good PFC Frames",
+ "RX Good PFC Frames",
+ "User Defined Counter 0",
+ "User Defined Counter 1",
+ "User Defined Counter 2",
+};
+
+static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, axienet_ethtool_stats_strings,
+ sizeof(axienet_ethtool_stats_strings));
+ break;
+ }
+}
+
+static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (lp->features & XAE_FEATURE_STATS)
+ return ARRAY_SIZE(axienet_ethtool_stats_strings);
+ fallthrough;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+axienet_ethtools_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ pause_stats->tx_pause_frames =
+ axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
+ pause_stats->rx_pause_frames =
+ axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+}
+
+static void
+axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ mac_stats->FramesTransmittedOK =
+ axienet_stat(lp, STAT_TX_GOOD_FRAMES);
+ mac_stats->SingleCollisionFrames =
+ axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
+ mac_stats->MultipleCollisionFrames =
+ axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
+ mac_stats->FramesReceivedOK =
+ axienet_stat(lp, STAT_RX_GOOD_FRAMES);
+ mac_stats->FrameCheckSequenceErrors =
+ axienet_stat(lp, STAT_RX_FCS_ERRORS);
+ mac_stats->AlignmentErrors =
+ axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
+ mac_stats->FramesWithDeferredXmissions =
+ axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
+ mac_stats->LateCollisions =
+ axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
+ mac_stats->FramesAbortedDueToXSColls =
+ axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
+ mac_stats->MulticastFramesXmittedOK =
+ axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
+ mac_stats->BroadcastFramesXmittedOK =
+ axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
+ mac_stats->FramesWithExcessiveDeferral =
+ axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
+ mac_stats->MulticastFramesReceivedOK =
+ axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
+ mac_stats->BroadcastFramesReceivedOK =
+ axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
+ mac_stats->InRangeLengthErrors =
+ axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+}
+
+static void
+axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ ctrl_stats->MACControlFramesTransmitted =
+ axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
+ ctrl_stats->MACControlFramesReceived =
+ axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
+ ctrl_stats->UnsupportedOpcodesReceived =
+ axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+}
+
+static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 16384 },
+ { },
+};
+
+static void
+axienet_ethtool_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ rmon_stats->undersize_pkts =
+ axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
+ rmon_stats->oversize_pkts =
+ axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
+ rmon_stats->fragments =
+ axienet_stat(lp, STAT_FRAGMENT_FRAMES);
+
+ rmon_stats->hist[0] =
+ axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
+ rmon_stats->hist[1] =
+ axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
+ rmon_stats->hist[2] =
+ axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
+ rmon_stats->hist[3] =
+ axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
+ rmon_stats->hist[4] =
+ axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
+ rmon_stats->hist[5] =
+ axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
+ rmon_stats->hist[6] =
+ rmon_stats->oversize_pkts;
+
+ rmon_stats->hist_tx[0] =
+ axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
+ rmon_stats->hist_tx[1] =
+ axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
+ rmon_stats->hist_tx[2] =
+ axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
+ rmon_stats->hist_tx[3] =
+ axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
+ rmon_stats->hist_tx[4] =
+ axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
+ rmon_stats->hist_tx[5] =
+ axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
+ rmon_stats->hist_tx[6] =
+ axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+
+ *ranges = axienet_rmon_ranges;
+}
+
static const struct ethtool_ops axienet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS,
@@ -2003,6 +2316,13 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.get_link_ksettings = axienet_ethtools_get_link_ksettings,
.set_link_ksettings = axienet_ethtools_set_link_ksettings,
.nway_reset = axienet_ethtools_nway_reset,
+ .get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
+ .get_strings = axienet_ethtools_get_strings,
+ .get_sset_count = axienet_ethtools_get_sset_count,
+ .get_pause_stats = axienet_ethtools_get_pause_stats,
+ .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
+ .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
+ .get_rmon_stats = axienet_ethtool_get_rmon_stats,
};
static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
@@ -2153,6 +2473,10 @@ static void axienet_dma_err_handler(struct work_struct *work)
dma_err_task);
struct net_device *ndev = lp->ndev;
+ /* Don't bother if we are going to stop anyway */
+ if (READ_ONCE(lp->stopping))
+ return;
+
napi_disable(&lp->napi_tx);
napi_disable(&lp->napi_rx);
@@ -2271,6 +2595,10 @@ static int axienet_probe(struct platform_device *pdev)
u64_stats_init(&lp->rx_stat_sync);
u64_stats_init(&lp->tx_stat_sync);
+ mutex_init(&lp->stats_lock);
+ seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
+ INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
+
lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
if (!lp->axi_clk) {
/* For backward compatibility, if named AXI clock is not present,
@@ -2311,42 +2639,35 @@ static int axienet_probe(struct platform_device *pdev)
/* Setup checksum offload, but default to off if not specified */
lp->features = 0;
+ if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
+ lp->features |= XAE_FEATURE_STATS;
+
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
if (!ret) {
switch (value) {
case 1:
- lp->csum_offload_on_tx_path =
- XAE_FEATURE_PARTIAL_TX_CSUM;
lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
- /* Can checksum TCP/UDP over IPv4. */
- ndev->features |= NETIF_F_IP_CSUM;
+ /* Can checksum any contiguous range */
+ ndev->features |= NETIF_F_HW_CSUM;
break;
case 2:
- lp->csum_offload_on_tx_path =
- XAE_FEATURE_FULL_TX_CSUM;
lp->features |= XAE_FEATURE_FULL_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
break;
- default:
- lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
}
}
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
if (!ret) {
switch (value) {
case 1:
- lp->csum_offload_on_rx_path =
- XAE_FEATURE_PARTIAL_RX_CSUM;
lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
+ ndev->features |= NETIF_F_RXCSUM;
break;
case 2:
- lp->csum_offload_on_rx_path =
- XAE_FEATURE_FULL_RX_CSUM;
lp->features |= XAE_FEATURE_FULL_RX_CSUM;
+ ndev->features |= NETIF_F_RXCSUM;
break;
- default:
- lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
}
}
/* For supporting jumbo frames, the Axi Ethernet hardware must have
@@ -2396,7 +2717,7 @@ static int axienet_probe(struct platform_device *pdev)
goto cleanup_clk;
}
- if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) {
+ if (!of_property_present(pdev->dev.of_node, "dmas")) {
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 56df37f8d50a..aef316278eb4 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1026,9 +1026,7 @@ static int ixp4xx_get_ts_info(struct net_device *dev,
if (info->phc_index < 0) {
info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping =
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 838e85ddec67..7f611c74eb62 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1194,7 +1194,6 @@ static void geneve_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &geneve_type);
- dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
@@ -1215,6 +1214,7 @@ static void geneve_setup(struct net_device *dev)
netif_keep_dst(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+ dev->lltx = true;
eth_hw_addr_random(dev);
}
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 427b91aca50d..a60bfb1abb7f 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1269,6 +1269,9 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb_cow_head(skb, dev->needed_headroom))
goto tx_err;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
skb_reset_inner_headers(skb);
/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
@@ -1353,7 +1356,7 @@ static void gtp_link_setup(struct net_device *dev)
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->priv_flags |= IFF_NO_QUEUE;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
netif_keep_dst(dev);
dev->needed_headroom = LL_MAX_HEADER + GTP_IPV4_MAXLEN;
@@ -1650,7 +1653,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
sock = sockfd_lookup(fd, &err);
if (!sock) {
pr_debug("gtp socket fd=%d not found\n", fd);
- return NULL;
+ return ERR_PTR(err);
}
sk = sock->sk;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 6ed38a3cdd73..3bf6785f9057 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -37,8 +37,6 @@
#include <linux/semaphore.h>
#include <linux/refcount.h>
-#define SIXPACK_VERSION "Revision: 0.3.0"
-
/* sixpack priority commands */
#define SIXP_SEOF 0x40 /* start and end of a 6pack frame */
#define SIXP_TX_URUN 0x48 /* transmit overrun */
@@ -88,22 +86,18 @@ struct sixpack {
struct net_device *dev; /* easy for intr handling */
/* These are pointers to the malloc()ed frame buffers. */
- unsigned char *rbuff; /* receiver buffer */
int rcount; /* received chars counter */
unsigned char *xbuff; /* transmitter buffer */
unsigned char *xhead; /* next byte to XMIT */
int xleft; /* bytes left in XMIT queue */
- unsigned char raw_buf[4];
- unsigned char cooked_buf[400];
+ u8 raw_buf[4];
+ u8 cooked_buf[400];
unsigned int rx_count;
unsigned int rx_count_cooked;
spinlock_t rxlock;
- int mtu; /* Our mtu (to spot changes!) */
- int buffsize; /* Max buffers sizes */
-
unsigned long flags; /* Flag values/ mode etc */
unsigned char mode; /* 6pack mode */
@@ -113,8 +107,8 @@ struct sixpack {
unsigned char slottime;
unsigned char duplex;
unsigned char led_state;
- unsigned char status;
- unsigned char status1;
+ u8 status;
+ u8 status1;
unsigned char status2;
unsigned char tx_enable;
unsigned char tnc_state;
@@ -128,7 +122,7 @@ struct sixpack {
#define AX25_6PACK_HEADER_LEN 0
-static void sixpack_decode(struct sixpack *, const unsigned char[], int);
+static void sixpack_decode(struct sixpack *, const u8 *, size_t);
static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
/*
@@ -167,7 +161,7 @@ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len)
unsigned char *msg, *p = icp;
int actual, count;
- if (len > sp->mtu) { /* sp->mtu = AX25_MTU = max. PACLEN = 256 */
+ if (len > AX25_MTU + 73) {
msg = "oversized transmit packet!";
goto out_drop;
}
@@ -333,7 +327,7 @@ static void sp_bump(struct sixpack *sp, char cmd)
{
struct sk_buff *skb;
int count;
- unsigned char *ptr;
+ u8 *ptr;
count = sp->rcount + 1;
@@ -431,7 +425,7 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count)
{
struct sixpack *sp;
- int count1;
+ size_t count1;
if (!count)
return;
@@ -544,7 +538,7 @@ static inline int tnc_init(struct sixpack *sp)
*/
static int sixpack_open(struct tty_struct *tty)
{
- char *rbuff = NULL, *xbuff = NULL;
+ char *xbuff = NULL;
struct net_device *dev;
struct sixpack *sp;
unsigned long len;
@@ -574,10 +568,8 @@ static int sixpack_open(struct tty_struct *tty)
len = dev->mtu * 2;
- rbuff = kmalloc(len + 4, GFP_KERNEL);
xbuff = kmalloc(len + 4, GFP_KERNEL);
-
- if (rbuff == NULL || xbuff == NULL) {
+ if (xbuff == NULL) {
err = -ENOBUFS;
goto out_free;
}
@@ -586,11 +578,8 @@ static int sixpack_open(struct tty_struct *tty)
sp->tty = tty;
- sp->rbuff = rbuff;
sp->xbuff = xbuff;
- sp->mtu = AX25_MTU + 73;
- sp->buffsize = len;
sp->rcount = 0;
sp->rx_count = 0;
sp->rx_count_cooked = 0;
@@ -631,7 +620,6 @@ static int sixpack_open(struct tty_struct *tty)
out_free:
kfree(xbuff);
- kfree(rbuff);
free_netdev(dev);
@@ -676,7 +664,6 @@ static void sixpack_close(struct tty_struct *tty)
del_timer_sync(&sp->resync_t);
/* Free all 6pack frame buffers after unreg. */
- kfree(sp->rbuff);
kfree(sp->xbuff);
free_netdev(sp->dev);
@@ -756,21 +743,14 @@ static struct tty_ldisc_ops sp_ldisc = {
/* Initialize 6pack control device -- register 6pack line discipline */
-static const char msg_banner[] __initconst = KERN_INFO \
- "AX.25: 6pack driver, " SIXPACK_VERSION "\n";
-static const char msg_regfail[] __initconst = KERN_ERR \
- "6pack: can't register line discipline (err = %d)\n";
-
static int __init sixpack_init_driver(void)
{
int status;
- printk(msg_banner);
-
/* Register the provided line protocol discipline */
status = tty_register_ldisc(&sp_ldisc);
if (status)
- printk(msg_regfail, status);
+ pr_err("6pack: can't register line discipline (err = %d)\n", status);
return status;
}
@@ -820,9 +800,9 @@ static int encode_sixpack(unsigned char *tx_buf, unsigned char *tx_buf_raw,
/* decode 4 sixpack-encoded bytes into 3 data bytes */
-static void decode_data(struct sixpack *sp, unsigned char inbyte)
+static void decode_data(struct sixpack *sp, u8 inbyte)
{
- unsigned char *buf;
+ u8 *buf;
if (sp->rx_count != 3) {
sp->raw_buf[sp->rx_count++] = inbyte;
@@ -848,9 +828,9 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
/* identify and execute a 6pack priority command byte */
-static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
+static void decode_prio_command(struct sixpack *sp, u8 cmd)
{
- int actual;
+ ssize_t actual;
if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */
@@ -898,9 +878,9 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
/* identify and execute a standard 6pack command byte */
-static void decode_std_command(struct sixpack *sp, unsigned char cmd)
+static void decode_std_command(struct sixpack *sp, u8 cmd)
{
- unsigned char checksum = 0, rest = 0;
+ u8 checksum = 0, rest = 0;
short i;
switch (cmd & SIXP_CMD_MASK) { /* normal command */
@@ -948,10 +928,10 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
/* decode a 6pack packet */
static void
-sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count)
+sixpack_decode(struct sixpack *sp, const u8 *pre_rbuff, size_t count)
{
- unsigned char inbyte;
- int count1;
+ size_t count1;
+ u8 inbyte;
for (count1 = 0; count1 < count; count1++) {
inbyte = pre_rbuff[count1];
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 83a16d10eedb..bac1bb69d63a 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -458,7 +458,7 @@ static void bpq_setup(struct net_device *dev)
dev->needs_free_netdev = true;
dev->flags = 0;
- dev->features = NETIF_F_LLTX; /* Allow recursion */
+ dev->lltx = true; /* Allow recursion */
#if IS_ENABLED(CONFIG_AX25)
dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 810977952f95..e690b95b1bbb 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -882,7 +882,7 @@ struct nvsp_message {
#define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */
#define VRSS_CHANNEL_MAX 64
-#define VRSS_CHANNEL_DEFAULT 8
+#define VRSS_CHANNEL_DEFAULT 16
#define RNDIS_MAX_PKT_DEFAULT 8
#define RNDIS_PKT_ALIGN_DEFAULT 8
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 4a9522689fa4..e01c5997a551 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -183,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
xdp.command = XDP_SETUP_PROG;
xdp.prog = prog;
- ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp);
+ ret = dev_xdp_propagate(vf_netdev, &xdp);
if (ret && prog)
bpf_prog_put(prog);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 44142245343d..153b97f8ec0d 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -987,7 +987,8 @@ struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
dev_info->bprog = prog;
}
} else {
- dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
+ dev_info->num_chn = max(VRSS_CHANNEL_DEFAULT,
+ netif_get_num_default_rss_queues());
dev_info->send_sections = NETVSC_DEFAULT_TX;
dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
dev_info->recv_sections = NETVSC_DEFAULT_RX;
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 95da876c5613..1075e24b11de 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -101,6 +101,7 @@ config IEEE802154_CA8210_DEBUGFS
config IEEE802154_MCR20A
tristate "MCR20A transceiver driver"
+ select REGMAP_SPI
depends on IEEE802154_DRIVERS && MAC802154
depends on SPI
help
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index a94d8dd71aad..2b7034193a00 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -16,7 +16,7 @@
#include <linux/skbuff.h>
#include <linux/ieee802154.h>
#include <linux/crc-ccitt.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/mac802154.h>
#include <net/cfg802154.h>
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 433fb5839203..020d392a98b6 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -1302,16 +1302,13 @@ mcr20a_probe(struct spi_device *spi)
irq_type = IRQF_TRIGGER_FALLING;
ret = devm_request_irq(&spi->dev, spi->irq, mcr20a_irq_isr,
- irq_type, dev_name(&spi->dev), lp);
+ irq_type | IRQF_NO_AUTOEN, dev_name(&spi->dev), lp);
if (ret) {
dev_err(&spi->dev, "could not request_irq for mcr20a\n");
ret = -ENODEV;
goto free_dev;
}
- /* disable_irq by default and wait for starting hardware */
- disable_irq(spi->irq);
-
ret = ieee802154_register_hw(hw);
if (ret) {
dev_crit(&spi->dev, "ieee802154_register_hw failed\n");
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index 65fd14da0f86..c572da9e9bc4 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -242,11 +242,8 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
int ret;
clk = clk_get(dev, "core");
- if (IS_ERR(clk)) {
- dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n");
-
- return ERR_CAST(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_cast_probe(dev, clk, "error getting core clock\n");
ret = clk_set_rate(clk, data->core_clock_rate);
if (ret) {
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index fef4eff7753a..b1afcb8740de 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
*/
+#include <net/inet_dscp.h>
+
#include "ipvlan.h"
static u32 ipvlan_jhash_secret __read_mostly;
@@ -420,7 +422,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
int err, ret = NET_XMIT_DROP;
struct flowi4 fl4 = {
.flowi4_oif = dev->ifindex,
- .flowi4_tos = RT_TOS(ip4h->tos),
+ .flowi4_tos = ip4h->tos & INET_DSCP_MASK,
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.flowi4_mark = skb->mark,
.daddr = ip4h->daddr,
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 094f44dac5c8..ee2c3cf4df36 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -114,7 +114,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
#define IPVLAN_ALWAYS_ON \
- (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
+ (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_VLAN_CHALLENGED)
#define IPVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
@@ -141,6 +141,7 @@ static int ipvlan_init(struct net_device *dev)
dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
dev->hw_enc_features |= dev->features;
+ dev->lltx = true;
netif_inherit_tso_max(dev, phy_dev);
dev->hard_header_len = phy_dev->hard_header_len;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 2b486e7c749c..1993b90b1a5f 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -171,6 +171,8 @@ static void gen_lo_setup(struct net_device *dev,
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+ dev->lltx = true;
+ dev->netns_local = true;
netif_keep_dst(dev);
dev->hw_features = NETIF_F_GSO_SOFTWARE;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
@@ -179,8 +181,6 @@ static void gen_lo_setup(struct net_device *dev,
| NETIF_F_RXCSUM
| NETIF_F_SCTP_CRC
| NETIF_F_HIGHDMA
- | NETIF_F_LLTX
- | NETIF_F_NETNS_LOCAL
| NETIF_F_VLAN_CHALLENGED
| NETIF_F_LOOPBACK;
dev->ethtool_ops = eth_ops;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 2da70bc3dd86..26034f80d4a4 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -154,19 +154,6 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
return sa;
}
-static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
-{
- struct macsec_rx_sa *sa = NULL;
- int an;
-
- for (an = 0; an < MACSEC_NUM_AN; an++) {
- sa = macsec_rxsa_get(rx_sc->sa[an]);
- if (sa)
- break;
- }
- return sa;
-}
-
static void free_rx_sc_rcu(struct rcu_head *head)
{
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
@@ -1208,15 +1195,12 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
/* If validateFrames is Strict or the C bit in the
* SecTAG is set, discard
*/
- struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
if (hdr->tci_an & MACSEC_TCI_C ||
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotUsingSA++;
u64_stats_update_end(&rxsc_stats->syncp);
DEV_STATS_INC(secy->netdev, rx_errors);
- if (active_rx_sa)
- this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
goto drop_nosa;
}
@@ -1226,8 +1210,6 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsUnusedSA++;
u64_stats_update_end(&rxsc_stats->syncp);
- if (active_rx_sa)
- this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
goto deliver;
}
@@ -3550,7 +3532,8 @@ static int macsec_dev_init(struct net_device *dev)
return err;
dev->features = real_dev->features & MACSEC_FEATURES;
- dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->lltx = true;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
macsec_set_head_tail_room(dev);
@@ -3581,7 +3564,6 @@ static netdev_features_t macsec_fix_features(struct net_device *dev,
features &= (real_dev->features & MACSEC_FEATURES) |
NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
- features |= NETIF_F_LLTX;
return features;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 24298a33e0e9..cf18e66de142 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -900,7 +900,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL)
-#define ALWAYS_ON_FEATURES (ALWAYS_ON_OFFLOADS | NETIF_F_LLTX)
+#define ALWAYS_ON_FEATURES ALWAYS_ON_OFFLOADS
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
@@ -932,6 +932,7 @@ static int macvlan_init(struct net_device *dev)
dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
dev->vlan_features |= ALWAYS_ON_OFFLOADS;
dev->hw_enc_features |= dev->features;
+ dev->lltx = true;
netif_inherit_tso_max(dev, lowerdev);
dev->hard_header_len = lowerdev->hard_header_len;
macvlan_set_lockdep_class(dev);
@@ -1213,7 +1214,8 @@ void macvlan_common_setup(struct net_device *dev)
dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
- dev->priv_flags |= IFF_UNICAST_FLT | IFF_CHANGE_PROTO_DOWN;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->change_proto_down = true;
dev->netdev_ops = &macvlan_netdev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = macvlan_dev_free;
diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig
index ce9d2d2ccf3b..15860d6ac39f 100644
--- a/drivers/net/mctp/Kconfig
+++ b/drivers/net/mctp/Kconfig
@@ -21,6 +21,11 @@ config MCTP_SERIAL
Say y here if you need to connect to MCTP endpoints over serial. To
compile as a module, use m; the module will be called mctp-serial.
+config MCTP_SERIAL_TEST
+ bool "MCTP serial tests" if !KUNIT_ALL_TESTS
+ depends on MCTP_SERIAL=y && KUNIT=y
+ default KUNIT_ALL_TESTS
+
config MCTP_TRANSPORT_I2C
tristate "MCTP SMBus/I2C transport"
# i2c-mux is optional, but we must build as a module if i2c-mux is a module
diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
index 8e989c157caa..1bc87a062686 100644
--- a/drivers/net/mctp/mctp-i3c.c
+++ b/drivers/net/mctp/mctp-i3c.c
@@ -13,7 +13,7 @@
#include <linux/i3c/device.h>
#include <linux/i3c/master.h>
#include <linux/if_arp.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/mctp.h>
#include <net/mctpdevice.h>
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
index 5bf6fdff701c..e63720ec3238 100644
--- a/drivers/net/mctp/mctp-serial.c
+++ b/drivers/net/mctp/mctp-serial.c
@@ -64,18 +64,18 @@ struct mctp_serial {
u16 txfcs, rxfcs, rxfcs_rcvd;
unsigned int txlen, rxlen;
unsigned int txpos, rxpos;
- unsigned char txbuf[BUFSIZE],
+ u8 txbuf[BUFSIZE],
rxbuf[BUFSIZE];
};
-static bool needs_escape(unsigned char c)
+static bool needs_escape(u8 c)
{
return c == BYTE_ESC || c == BYTE_FRAME;
}
-static int next_chunk_len(struct mctp_serial *dev)
+static unsigned int next_chunk_len(struct mctp_serial *dev)
{
- int i;
+ unsigned int i;
/* either we have no bytes to send ... */
if (dev->txpos == dev->txlen)
@@ -91,15 +91,15 @@ static int next_chunk_len(struct mctp_serial *dev)
* will be those non-escaped bytes, and does not include the escaped
* byte.
*/
- for (i = 1; i + dev->txpos + 1 < dev->txlen; i++) {
- if (needs_escape(dev->txbuf[dev->txpos + i + 1]))
+ for (i = 1; i + dev->txpos < dev->txlen; i++) {
+ if (needs_escape(dev->txbuf[dev->txpos + i]))
break;
}
return i;
}
-static int write_chunk(struct mctp_serial *dev, unsigned char *buf, int len)
+static ssize_t write_chunk(struct mctp_serial *dev, u8 *buf, size_t len)
{
return dev->tty->ops->write(dev->tty, buf, len);
}
@@ -108,9 +108,10 @@ static void mctp_serial_tx_work(struct work_struct *work)
{
struct mctp_serial *dev = container_of(work, struct mctp_serial,
tx_work);
- unsigned char c, buf[3];
unsigned long flags;
- int len, txlen;
+ ssize_t txlen;
+ unsigned int len;
+ u8 c, buf[3];
spin_lock_irqsave(&dev->lock, flags);
@@ -293,7 +294,7 @@ static void mctp_serial_rx(struct mctp_serial *dev)
dev->netdev->stats.rx_bytes += dev->rxlen;
}
-static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c)
+static void mctp_serial_push_header(struct mctp_serial *dev, u8 c)
{
switch (dev->rxpos) {
case 0:
@@ -323,7 +324,7 @@ static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c)
}
}
-static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c)
+static void mctp_serial_push_trailer(struct mctp_serial *dev, u8 c)
{
switch (dev->rxpos) {
case 0:
@@ -347,7 +348,7 @@ static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c)
}
}
-static void mctp_serial_push(struct mctp_serial *dev, unsigned char c)
+static void mctp_serial_push(struct mctp_serial *dev, u8 c)
{
switch (dev->rxstate) {
case STATE_IDLE:
@@ -394,7 +395,7 @@ static void mctp_serial_tty_receive_buf(struct tty_struct *tty, const u8 *c,
const u8 *f, size_t len)
{
struct mctp_serial *dev = tty->disc_data;
- int i;
+ size_t i;
if (!netif_running(dev->netdev))
return;
@@ -521,3 +522,112 @@ module_exit(mctp_serial_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
MODULE_DESCRIPTION("MCTP Serial transport");
+
+#if IS_ENABLED(CONFIG_MCTP_SERIAL_TEST)
+#include <kunit/test.h>
+
+#define MAX_CHUNKS 6
+struct test_chunk_tx {
+ u8 input_len;
+ u8 input[MCTP_SERIAL_MTU];
+ u8 chunks[MAX_CHUNKS];
+};
+
+static void test_next_chunk_len(struct kunit *test)
+{
+ struct mctp_serial devx;
+ struct mctp_serial *dev = &devx;
+ int next;
+
+ const struct test_chunk_tx *params = test->param_value;
+
+ memset(dev, 0x0, sizeof(*dev));
+ memcpy(dev->txbuf, params->input, params->input_len);
+ dev->txlen = params->input_len;
+
+ for (size_t i = 0; i < MAX_CHUNKS; i++) {
+ next = next_chunk_len(dev);
+ dev->txpos += next;
+ KUNIT_EXPECT_EQ(test, next, params->chunks[i]);
+
+ if (next == 0) {
+ KUNIT_EXPECT_EQ(test, dev->txpos, dev->txlen);
+ return;
+ }
+ }
+
+ KUNIT_FAIL_AND_ABORT(test, "Ran out of chunks");
+}
+
+static struct test_chunk_tx chunk_tx_tests[] = {
+ {
+ .input_len = 5,
+ .input = { 0x00, 0x11, 0x22, 0x7e, 0x80 },
+ .chunks = { 3, 1, 1, 0},
+ },
+ {
+ .input_len = 5,
+ .input = { 0x00, 0x11, 0x22, 0x7e, 0x7d },
+ .chunks = { 3, 1, 1, 0},
+ },
+ {
+ .input_len = 3,
+ .input = { 0x7e, 0x11, 0x22, },
+ .chunks = { 1, 2, 0},
+ },
+ {
+ .input_len = 3,
+ .input = { 0x7e, 0x7e, 0x7d, },
+ .chunks = { 1, 1, 1, 0},
+ },
+ {
+ .input_len = 4,
+ .input = { 0x7e, 0x7e, 0x00, 0x7d, },
+ .chunks = { 1, 1, 1, 1, 0},
+ },
+ {
+ .input_len = 6,
+ .input = { 0x7e, 0x7e, 0x00, 0x7d, 0x10, 0x10},
+ .chunks = { 1, 1, 1, 1, 2, 0},
+ },
+ {
+ .input_len = 1,
+ .input = { 0x7e },
+ .chunks = { 1, 0 },
+ },
+ {
+ .input_len = 1,
+ .input = { 0x80 },
+ .chunks = { 1, 0 },
+ },
+ {
+ .input_len = 3,
+ .input = { 0x80, 0x80, 0x00 },
+ .chunks = { 3, 0 },
+ },
+ {
+ .input_len = 7,
+ .input = { 0x01, 0x00, 0x08, 0xc8, 0x00, 0x80, 0x02 },
+ .chunks = { 7, 0 },
+ },
+ {
+ .input_len = 7,
+ .input = { 0x01, 0x00, 0x08, 0xc8, 0x7e, 0x80, 0x02 },
+ .chunks = { 4, 1, 2, 0 },
+ },
+};
+
+KUNIT_ARRAY_PARAM(chunk_tx, chunk_tx_tests, NULL);
+
+static struct kunit_case mctp_serial_test_cases[] = {
+ KUNIT_CASE_PARAM(test_next_chunk_len, chunk_tx_gen_params),
+};
+
+static struct kunit_suite mctp_serial_test_suite = {
+ .name = "mctp_serial",
+ .test_cases = mctp_serial_test_cases,
+};
+
+kunit_test_suite(mctp_serial_test_suite);
+
+#endif /* CONFIG_MCTP_SERIAL_TEST */
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index fd02f5cbc853..b156493d7084 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -7,6 +7,7 @@
*/
#include <linux/acpi.h>
+#include <linux/dev_printk.h>
#include <linux/fwnode_mdio.h>
#include <linux/of.h>
#include <linux/phy.h>
@@ -104,7 +105,7 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
return rc;
}
- dev_dbg(&mdio->dev, "registered phy %p fwnode at address %i\n",
+ dev_dbg(&mdio->dev, "registered phy fwnode %pfw at address %i\n",
child, addr);
return 0;
}
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index f40eb50bb978..b7bc70586ee0 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -337,6 +337,7 @@ static const struct of_device_id unimac_mdio_ids[] = {
{ .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
{ .compatible = "brcm,asp-v2.0-mdio", },
+ { .compatible = "brcm,bcm6846-mdio", },
{ .compatible = "brcm,genet-mdio-v5", },
{ .compatible = "brcm,genet-mdio-v4", },
{ .compatible = "brcm,genet-mdio-v3", },
diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c
index de08419d0c98..b70e6d1ad429 100644
--- a/drivers/net/mdio/mdio-mux-mmioreg.c
+++ b/drivers/net/mdio/mdio-mux-mmioreg.c
@@ -96,7 +96,7 @@ static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
{
- struct device_node *np2, *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node;
struct mdio_mux_mmioreg_state *s;
struct resource res;
const __be32 *iprop;
@@ -109,52 +109,42 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
return -ENOMEM;
ret = of_address_to_resource(np, 0, &res);
- if (ret) {
- dev_err(&pdev->dev, "could not obtain memory map for node %pOF\n",
- np);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "could not obtain memory map for node %pOF\n", np);
s->phys = res.start;
s->iosize = resource_size(&res);
if (s->iosize != sizeof(uint8_t) &&
s->iosize != sizeof(uint16_t) &&
- s->iosize != sizeof(uint32_t)) {
- dev_err(&pdev->dev, "only 8/16/32-bit registers are supported\n");
- return -EINVAL;
- }
+ s->iosize != sizeof(uint32_t))
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "only 8/16/32-bit registers are supported\n");
iprop = of_get_property(np, "mux-mask", &len);
- if (!iprop || len != sizeof(uint32_t)) {
- dev_err(&pdev->dev, "missing or invalid mux-mask property\n");
- return -ENODEV;
- }
- if (be32_to_cpup(iprop) >= BIT(s->iosize * 8)) {
- dev_err(&pdev->dev, "only 8/16/32-bit registers are supported\n");
- return -EINVAL;
- }
+ if (!iprop || len != sizeof(uint32_t))
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "missing or invalid mux-mask property\n");
+ if (be32_to_cpup(iprop) >= BIT(s->iosize * 8))
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "only 8/16/32-bit registers are supported\n");
s->mask = be32_to_cpup(iprop);
/*
* Verify that the 'reg' property of each child MDIO bus does not
* set any bits outside of the 'mask'.
*/
- for_each_available_child_of_node(np, np2) {
+ for_each_available_child_of_node_scoped(np, np2) {
u64 reg;
- if (of_property_read_reg(np2, 0, &reg, NULL)) {
- dev_err(&pdev->dev, "mdio-mux child node %pOF is "
- "missing a 'reg' property\n", np2);
- of_node_put(np2);
- return -ENODEV;
- }
- if ((u32)reg & ~s->mask) {
- dev_err(&pdev->dev, "mdio-mux child node %pOF has "
- "a 'reg' value with unmasked bits\n",
- np2);
- of_node_put(np2);
- return -ENODEV;
- }
+ if (of_property_read_reg(np2, 0, &reg, NULL))
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "mdio-mux child node %pOF is missing a 'reg' property\n",
+ np2);
+ if ((u32)reg & ~s->mask)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "mdio-mux child node %pOF has a 'reg' value with unmasked bits\n",
+ np2);
}
ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 08e607f62e10..2f4fc664d2e1 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -390,7 +390,7 @@ EXPORT_SYMBOL(of_phy_get_and_connect);
bool of_phy_is_fixed_link(struct device_node *np)
{
struct device_node *dn;
- int len, err;
+ int err;
const char *managed;
/* New binding */
@@ -405,8 +405,7 @@ bool of_phy_is_fixed_link(struct device_node *np)
return true;
/* Old binding */
- if (of_get_property(np, "fixed-link", &len) &&
- len == (5 * sizeof(__be32)))
+ if (of_property_count_u32_elems(np, "fixed-link") == 5)
return true;
return false;
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 963d8b4af28d..54c8b9d5b5fc 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -731,10 +731,10 @@ struct failover *net_failover_create(struct net_device *standby_dev)
IFF_TX_SKB_SHARING);
/* don't acquire failover netdev's netif_tx_lock when transmitting */
- failover_dev->features |= NETIF_F_LLTX;
+ failover_dev->lltx = true;
/* Don't allow failover devices to change network namespaces. */
- failover_dev->features |= NETIF_F_NETNS_LOCAL;
+ failover_dev->netns_local = true;
failover_dev->hw_features = FAILOVER_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_TX |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 9c09293b5258..de20928f7402 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -37,8 +37,9 @@
#include <linux/configfs.h>
#include <linux/etherdevice.h>
#include <linux/utsname.h>
+#include <linux/rtnetlink.h>
-MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>");
+MODULE_AUTHOR("Matt Mackall <mpm@selenic.com>");
MODULE_DESCRIPTION("Console driver for network interfaces");
MODULE_LICENSE("GPL");
@@ -72,9 +73,16 @@ __setup("netconsole=", option_setup);
/* Linked list of all configured targets */
static LIST_HEAD(target_list);
+/* target_cleanup_list is used to track targets that need to be cleaned outside
+ * of target_list_lock. It should be cleaned in the same function it is
+ * populated.
+ */
+static LIST_HEAD(target_cleanup_list);
/* This needs to be a spinlock because write_msg() cannot sleep */
static DEFINE_SPINLOCK(target_list_lock);
+/* This needs to be a mutex because netpoll_cleanup might sleep */
+static DEFINE_MUTEX(target_cleanup_list_lock);
/*
* Console driver for extended netconsoles. Registered on the first use to
@@ -210,6 +218,33 @@ static struct netconsole_target *alloc_and_init(void)
return nt;
}
+/* Clean up every target in the cleanup_list and move the clean targets back to
+ * the main target_list.
+ */
+static void netconsole_process_cleanups_core(void)
+{
+ struct netconsole_target *nt, *tmp;
+ unsigned long flags;
+
+ /* The cleanup needs RTNL locked */
+ ASSERT_RTNL();
+
+ mutex_lock(&target_cleanup_list_lock);
+ list_for_each_entry_safe(nt, tmp, &target_cleanup_list, list) {
+ /* all entries in the cleanup_list needs to be disabled */
+ WARN_ON_ONCE(nt->enabled);
+ do_netpoll_cleanup(&nt->np);
+ /* moved the cleaned target to target_list. Need to hold both
+ * locks
+ */
+ spin_lock_irqsave(&target_list_lock, flags);
+ list_move(&nt->list, &target_list);
+ spin_unlock_irqrestore(&target_list_lock, flags);
+ }
+ WARN_ON_ONCE(!list_empty(&target_cleanup_list));
+ mutex_unlock(&target_cleanup_list_lock);
+}
+
#ifdef CONFIG_NETCONSOLE_DYNAMIC
/*
@@ -246,6 +281,19 @@ static struct netconsole_target *to_target(struct config_item *item)
struct netconsole_target, group);
}
+/* Do the list cleanup with the rtnl lock hold. rtnl lock is necessary because
+ * netdev might be cleaned-up by calling __netpoll_cleanup(),
+ */
+static void netconsole_process_cleanups(void)
+{
+ /* rtnl lock is called here, because it has precedence over
+ * target_cleanup_list_lock mutex and target_cleanup_list
+ */
+ rtnl_lock();
+ netconsole_process_cleanups_core();
+ rtnl_unlock();
+}
+
/* Get rid of possible trailing newline, returning the new length */
static void trim_newline(char *s, size_t maxlen)
{
@@ -336,14 +384,14 @@ static ssize_t enabled_store(struct config_item *item,
struct netconsole_target *nt = to_target(item);
unsigned long flags;
bool enabled;
- int err;
+ ssize_t ret;
mutex_lock(&dynamic_netconsole_mutex);
- err = kstrtobool(buf, &enabled);
- if (err)
+ ret = kstrtobool(buf, &enabled);
+ if (ret)
goto out_unlock;
- err = -EINVAL;
+ ret = -EINVAL;
if (enabled == nt->enabled) {
pr_info("network logging has already %s\n",
nt->enabled ? "started" : "stopped");
@@ -365,8 +413,8 @@ static ssize_t enabled_store(struct config_item *item,
*/
netpoll_print_options(&nt->np);
- err = netpoll_setup(&nt->np);
- if (err)
+ ret = netpoll_setup(&nt->np);
+ if (ret)
goto out_unlock;
nt->enabled = true;
@@ -376,17 +424,23 @@ static ssize_t enabled_store(struct config_item *item,
* otherwise we might end up in write_msg() with
* nt->np.dev == NULL and nt->enabled == true
*/
+ mutex_lock(&target_cleanup_list_lock);
spin_lock_irqsave(&target_list_lock, flags);
nt->enabled = false;
+ /* Remove the target from the list, while holding
+ * target_list_lock
+ */
+ list_move(&nt->list, &target_cleanup_list);
spin_unlock_irqrestore(&target_list_lock, flags);
- netpoll_cleanup(&nt->np);
+ mutex_unlock(&target_cleanup_list_lock);
}
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
+ /* Deferred cleanup */
+ netconsole_process_cleanups();
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return err;
+ return ret;
}
static ssize_t release_store(struct config_item *item, const char *buf,
@@ -394,27 +448,26 @@ static ssize_t release_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
bool release;
- int err;
+ ssize_t ret;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
config_item_name(&nt->group.cg_item));
- err = -EINVAL;
+ ret = -EINVAL;
goto out_unlock;
}
- err = kstrtobool(buf, &release);
- if (err)
+ ret = kstrtobool(buf, &release);
+ if (ret)
goto out_unlock;
nt->release = release;
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return err;
+ return ret;
}
static ssize_t extended_store(struct config_item *item, const char *buf,
@@ -422,27 +475,25 @@ static ssize_t extended_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
bool extended;
- int err;
+ ssize_t ret;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
config_item_name(&nt->group.cg_item));
- err = -EINVAL;
+ ret = -EINVAL;
goto out_unlock;
}
- err = kstrtobool(buf, &extended);
- if (err)
+ ret = kstrtobool(buf, &extended);
+ if (ret)
goto out_unlock;
nt->extended = extended;
-
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return err;
+ return ret;
}
static ssize_t dev_name_store(struct config_item *item, const char *buf,
@@ -469,7 +520,7 @@ static ssize_t local_port_store(struct config_item *item, const char *buf,
size_t count)
{
struct netconsole_target *nt = to_target(item);
- int rv = -EINVAL;
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -478,21 +529,20 @@ static ssize_t local_port_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- rv = kstrtou16(buf, 10, &nt->np.local_port);
- if (rv < 0)
+ ret = kstrtou16(buf, 10, &nt->np.local_port);
+ if (ret < 0)
goto out_unlock;
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return rv;
+ return ret;
}
static ssize_t remote_port_store(struct config_item *item,
const char *buf, size_t count)
{
struct netconsole_target *nt = to_target(item);
- int rv = -EINVAL;
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -501,20 +551,20 @@ static ssize_t remote_port_store(struct config_item *item,
goto out_unlock;
}
- rv = kstrtou16(buf, 10, &nt->np.remote_port);
- if (rv < 0)
+ ret = kstrtou16(buf, 10, &nt->np.remote_port);
+ if (ret < 0)
goto out_unlock;
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return rv;
+ return ret;
}
static ssize_t local_ip_store(struct config_item *item, const char *buf,
size_t count)
{
struct netconsole_target *nt = to_target(item);
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -541,17 +591,17 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return -EINVAL;
+ return ret;
}
static ssize_t remote_ip_store(struct config_item *item, const char *buf,
size_t count)
{
struct netconsole_target *nt = to_target(item);
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -578,11 +628,10 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return -EINVAL;
+ return ret;
}
static ssize_t remote_mac_store(struct config_item *item, const char *buf,
@@ -590,6 +639,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
u8 remote_mac[ETH_ALEN];
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -604,11 +654,10 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
goto out_unlock;
memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN);
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return -EINVAL;
+ return ret;
}
struct userdatum {
@@ -685,7 +734,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
struct userdatum *udm = to_userdatum(item);
struct netconsole_target *nt;
struct userdata *ud;
- int ret;
+ ssize_t ret;
if (count > MAX_USERDATA_VALUE_LENGTH)
return -EMSGSIZE;
@@ -700,9 +749,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
ud = to_userdata(item->ci_parent);
nt = userdata_to_target(ud);
update_userdata(nt);
-
- mutex_unlock(&dynamic_netconsole_mutex);
- return count;
+ ret = count;
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
return ret;
@@ -778,7 +825,7 @@ static struct configfs_group_operations userdata_ops = {
.drop_item = userdatum_drop,
};
-static struct config_item_type userdata_type = {
+static const struct config_item_type userdata_type = {
.ct_item_ops = &userdatum_ops,
.ct_group_ops = &userdata_ops,
.ct_attrs = userdata_attrs,
@@ -950,7 +997,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
unsigned long flags;
- struct netconsole_target *nt;
+ struct netconsole_target *nt, *tmp;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
bool stopped = false;
@@ -958,9 +1005,9 @@ static int netconsole_netdev_event(struct notifier_block *this,
event == NETDEV_RELEASE || event == NETDEV_JOIN))
goto done;
+ mutex_lock(&target_cleanup_list_lock);
spin_lock_irqsave(&target_list_lock, flags);
-restart:
- list_for_each_entry(nt, &target_list, list) {
+ list_for_each_entry_safe(nt, tmp, &target_list, list) {
netconsole_target_get(nt);
if (nt->np.dev == dev) {
switch (event) {
@@ -970,25 +1017,16 @@ restart:
case NETDEV_RELEASE:
case NETDEV_JOIN:
case NETDEV_UNREGISTER:
- /* rtnl_lock already held
- * we might sleep in __netpoll_cleanup()
- */
nt->enabled = false;
- spin_unlock_irqrestore(&target_list_lock, flags);
-
- __netpoll_cleanup(&nt->np);
-
- spin_lock_irqsave(&target_list_lock, flags);
- netdev_put(nt->np.dev, &nt->np.dev_tracker);
- nt->np.dev = NULL;
+ list_move(&nt->list, &target_cleanup_list);
stopped = true;
- netconsole_target_put(nt);
- goto restart;
}
}
netconsole_target_put(nt);
}
spin_unlock_irqrestore(&target_list_lock, flags);
+ mutex_unlock(&target_cleanup_list_lock);
+
if (stopped) {
const char *msg = "had an event";
@@ -1007,6 +1045,11 @@ restart:
dev->name, msg);
}
+ /* Process target_cleanup_list entries. By the end, target_cleanup_list
+ * should be empty
+ */
+ netconsole_process_cleanups_core();
+
done:
return NOTIFY_DONE;
}
@@ -1118,8 +1161,14 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
this_chunk = min(userdata_len - sent_userdata,
MAX_PRINT_CHUNK - preceding_bytes);
- if (WARN_ON_ONCE(this_chunk <= 0))
+ if (WARN_ON_ONCE(this_chunk < 0))
+ /* this_chunk could be zero if all the previous
+ * message used all the buffer. This is not a
+ * problem, userdata will be sent in the next
+ * iteration
+ */
return;
+
memcpy(buf + this_header + this_offset,
userdata + sent_userdata,
this_chunk);
@@ -1215,11 +1264,18 @@ static struct netconsole_target *alloc_param_target(char *target_config,
goto fail;
err = netpoll_setup(&nt->np);
- if (err)
- goto fail;
-
+ if (err) {
+ pr_err("Not enabling netconsole for %s%d. Netpoll setup failed\n",
+ NETCONSOLE_PARAM_TARGET_PREFIX, cmdline_count);
+ if (!IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC))
+ /* only fail if dynamic reconfiguration is set,
+ * otherwise, keep the target in the list, but disabled.
+ */
+ goto fail;
+ } else {
+ nt->enabled = true;
+ }
populate_configfs_item(nt, cmdline_count);
- nt->enabled = true;
return nt;
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 92a7a36b93ac..3e0b61202f0c 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -836,7 +836,8 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
nsim_dev = nsim_trap_data->nsim_dev;
if (!devl_trylock(priv_to_devlink(nsim_dev))) {
- schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1);
+ queue_delayed_work(system_unbound_wq,
+ &nsim_dev->trap_data->trap_report_dw, 1);
return;
}
@@ -848,11 +849,12 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
continue;
nsim_dev_trap_report(nsim_dev_port);
+ cond_resched();
}
devl_unlock(priv_to_devlink(nsim_dev));
-
- schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
- msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
+ queue_delayed_work(system_unbound_wq,
+ &nsim_dev->trap_data->trap_report_dw,
+ msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
}
static int nsim_dev_traps_init(struct devlink *devlink)
@@ -907,8 +909,9 @@ static int nsim_dev_traps_init(struct devlink *devlink)
INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
nsim_dev_trap_report_work);
- schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
- msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
+ queue_delayed_work(system_unbound_wq,
+ &nsim_dev->trap_data->trap_report_dw,
+ msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
return 0;
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index a1f91ff8ec56..41e80f78b316 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -1414,7 +1414,6 @@ out:
static const struct file_operations nsim_nexthop_bucket_activity_fops = {
.open = simple_open,
.write = nsim_nexthop_bucket_activity_write,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 16789cd446e9..059269557d92 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -65,6 +65,7 @@ static struct netkit *netkit_priv(const struct net_device *dev)
static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
struct netkit *nk = netkit_priv(dev);
enum netkit_action ret = READ_ONCE(nk->policy);
netdev_tx_t ret_dev = NET_XMIT_SUCCESS;
@@ -72,6 +73,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *peer;
int len = skb->len;
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
rcu_read_lock();
peer = rcu_dereference(nk->peer);
if (unlikely(!peer || !(peer->flags & IFF_UP) ||
@@ -110,6 +112,7 @@ drop_stats:
break;
}
rcu_read_unlock();
+ bpf_net_ctx_clear(bpf_net_ctx);
return ret_dev;
}
@@ -255,11 +258,13 @@ static void netkit_setup(struct net_device *dev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->priv_flags |= IFF_PHONY_HEADROOM;
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_DISABLE_NETPOLL;
+ dev->lltx = true;
dev->ethtool_ops = &netkit_ethtool_ops;
dev->netdev_ops = &netkit_netdev_ops;
- dev->features |= netkit_features | NETIF_F_LLTX;
+ dev->features |= netkit_features;
dev->hw_features = netkit_features;
dev->hw_enc_features = netkit_features;
dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index e5a0987a263e..8bfd4ee5a8c4 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -63,13 +63,13 @@ static void nlmon_setup(struct net_device *dev)
{
dev->type = ARPHRD_NETLINK;
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->lltx = true;
dev->netdev_ops = &nlmon_ops;
dev->ethtool_ops = &nlmon_ethtool_ops;
dev->needs_free_netdev = true;
- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
- NETIF_F_HIGHDMA | NETIF_F_LLTX;
+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
dev->flags = IFF_NOARP;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
diff --git a/drivers/net/pcs/pcs-xpcs-wx.c b/drivers/net/pcs/pcs-xpcs-wx.c
index 19c75886f070..5f5cd3596cb8 100644
--- a/drivers/net/pcs/pcs-xpcs-wx.c
+++ b/drivers/net/pcs/pcs-xpcs-wx.c
@@ -109,7 +109,7 @@ static void txgbe_pma_config_1g(struct dw_xpcs *xpcs)
txgbe_write_pma(xpcs, TXGBE_DFE_TAP_CTL0, 0);
val = txgbe_read_pma(xpcs, TXGBE_RX_GEN_CTL3);
val = u16_replace_bits(val, 0x4, TXGBE_RX_GEN_CTL3_LOS_TRSHLD0);
- txgbe_write_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, val);
+ txgbe_write_pma(xpcs, TXGBE_RX_GEN_CTL3, val);
txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL0, 0x20);
txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL3, 0x46);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 7fddc8306d82..01b235b3bb7e 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -44,6 +44,9 @@ config LED_TRIGGER_PHY
<Speed in megabits>Mbps OR <Speed in gigabits>Gbps OR link
for any speed known to the PHY.
+config OPEN_ALLIANCE_HELPERS
+ bool
+
config PHYLIB_LEDS
def_bool OF
depends on LEDS_CLASS=y || LEDS_CLASS=PHYLIB
@@ -109,6 +112,13 @@ config ADIN1100_PHY
Currently supports the:
- ADIN1100 - Robust,Industrial, Low Power 10BASE-T1L Ethernet PHY
+config AMCC_QT2025_PHY
+ tristate "AMCC QT2025 PHY"
+ depends on RUST_PHYLIB_ABSTRACTIONS
+ depends on RUST_FW_LOADER_ABSTRACTIONS
+ help
+ Adds support for the Applied Micro Circuits Corporation QT2025 PHY.
+
source "drivers/net/phy/aquantia/Kconfig"
config AX88796B_PHY
@@ -414,6 +424,7 @@ config DP83TD510_PHY
config DP83TG720_PHY
tristate "Texas Instruments DP83TG720 Ethernet 1000Base-T1 PHY"
+ select OPEN_ALLIANCE_HELPERS
help
The DP83TG720S-Q1 is an automotive Ethernet physical layer
transceiver compliant with IEEE 802.3bp and Open Alliance
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 202ed7f450da..90f886844381 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -2,7 +2,7 @@
# Makefile for Linux PHY drivers
libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
- linkmode.o
+ linkmode.o phy_link_topology.o
mdio-bus-y += mdio_bus.o mdio_device.o
ifdef CONFIG_MDIO_DEVICE
@@ -22,6 +22,7 @@ endif
obj-$(CONFIG_MDIO_DEVRES) += mdio_devres.o
libphy-$(CONFIG_SWPHY) += swphy.o
libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o
+libphy-$(CONFIG_OPEN_ALLIANCE_HELPERS) += open_alliance_helpers.o
obj-$(CONFIG_PHYLINK) += phylink.o
obj-$(CONFIG_PHYLIB) += libphy.o
@@ -36,6 +37,7 @@ obj-$(CONFIG_ADIN_PHY) += adin.o
obj-$(CONFIG_ADIN1100_PHY) += adin1100.o
obj-$(CONFIG_AIR_EN8811H_PHY) += air_en8811h.o
obj-$(CONFIG_AMD_PHY) += amd.o
+obj-$(CONFIG_AMCC_QT2025_PHY) += qt2025.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia/
ifdef CONFIG_AX88796B_RUST_PHY
obj-$(CONFIG_AX88796B_PHY) += ax88796b_rust.o
diff --git a/drivers/net/phy/air_en8811h.c b/drivers/net/phy/air_en8811h.c
index 3cdc8c6b30b6..8d076b9609fd 100644
--- a/drivers/net/phy/air_en8811h.c
+++ b/drivers/net/phy/air_en8811h.c
@@ -15,7 +15,7 @@
#include <linux/firmware.h>
#include <linux/property.h>
#include <linux/wordpart.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define EN8811H_PHY_ID 0x03a2a411
diff --git a/drivers/net/phy/aquantia/aquantia_firmware.c b/drivers/net/phy/aquantia/aquantia_firmware.c
index 524627a36c6f..dab3af80593f 100644
--- a/drivers/net/phy/aquantia/aquantia_firmware.c
+++ b/drivers/net/phy/aquantia/aquantia_firmware.c
@@ -6,7 +6,7 @@
#include <linux/crc-itu-t.h>
#include <linux/nvmem-consumer.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "aquantia.h"
@@ -353,26 +353,32 @@ int aqr_firmware_load(struct phy_device *phydev)
{
int ret;
- ret = aqr_wait_reset_complete(phydev);
- if (ret)
- return ret;
-
- /* Check if the firmware is not already loaded by pooling
- * the current version returned by the PHY. If 0 is returned,
- * no firmware is loaded.
+ /* Check if the firmware is not already loaded by polling
+ * the current version returned by the PHY.
*/
- ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
- if (ret > 0)
- goto exit;
-
- ret = aqr_firmware_load_nvmem(phydev);
- if (!ret)
- goto exit;
-
- ret = aqr_firmware_load_fs(phydev);
- if (ret)
+ ret = aqr_wait_reset_complete(phydev);
+ switch (ret) {
+ case 0:
+ /* Some firmware is loaded => do nothing */
+ return 0;
+ case -ETIMEDOUT:
+ /* VEND1_GLOBAL_FW_ID still reads 0 after 2 seconds of polling.
+ * We don't have full confidence that no firmware is loaded (in
+ * theory it might just not have loaded yet), but we will
+ * assume that, and load a new image.
+ */
+ ret = aqr_firmware_load_nvmem(phydev);
+ if (!ret)
+ return ret;
+
+ ret = aqr_firmware_load_fs(phydev);
+ if (ret)
+ return ret;
+ break;
+ default:
+ /* PHY read error, propagate it to the caller */
return ret;
+ }
-exit:
return 0;
}
diff --git a/drivers/net/phy/aquantia/aquantia_leds.c b/drivers/net/phy/aquantia/aquantia_leds.c
index 0516ac02c3f8..201c8df93fad 100644
--- a/drivers/net/phy/aquantia/aquantia_leds.c
+++ b/drivers/net/phy/aquantia/aquantia_leds.c
@@ -120,7 +120,8 @@ int aqr_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
int aqr_phy_led_active_low_set(struct phy_device *phydev, int index, bool enable)
{
return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_DRIVE(index),
- VEND1_GLOBAL_LED_DRIVE_VDD, enable);
+ VEND1_GLOBAL_LED_DRIVE_VDD,
+ enable ? VEND1_GLOBAL_LED_DRIVE_VDD : 0);
}
int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long modes)
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index e982e9ce44a5..c33a5ef34ba0 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -435,6 +435,9 @@ static int aqr107_set_tunable(struct phy_device *phydev,
}
}
+#define AQR_FW_WAIT_SLEEP_US 20000
+#define AQR_FW_WAIT_TIMEOUT_US 2000000
+
/* If we configure settings whilst firmware is still initializing the chip,
* then these settings may be overwritten. Therefore make sure chip
* initialization has completed. Use presence of the firmware ID as
@@ -444,11 +447,19 @@ static int aqr107_set_tunable(struct phy_device *phydev,
*/
int aqr_wait_reset_complete(struct phy_device *phydev)
{
- int val;
+ int ret, val;
+
+ ret = read_poll_timeout(phy_read_mmd, val, val != 0,
+ AQR_FW_WAIT_SLEEP_US, AQR_FW_WAIT_TIMEOUT_US,
+ false, phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_FW_ID);
+ if (val < 0) {
+ phydev_err(phydev, "Failed to read VEND1_GLOBAL_FW_ID: %pe\n",
+ ERR_PTR(val));
+ return val;
+ }
- return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_FW_ID, val, val != 0,
- 20000, 2000000, false);
+ return ret;
}
static void aqr107_chip_info(struct phy_device *phydev)
@@ -478,7 +489,7 @@ static int aqr107_config_init(struct phy_device *phydev)
{
struct aqr107_priv *priv = phydev->priv;
u32 led_active_low;
- int ret, index = 0;
+ int ret;
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
@@ -505,10 +516,9 @@ static int aqr107_config_init(struct phy_device *phydev)
/* Restore LED polarity state after reset */
for_each_set_bit(led_active_low, &priv->leds_active_low, AQR_MAX_LEDS) {
- ret = aqr_phy_led_active_low_set(phydev, index, led_active_low);
+ ret = aqr_phy_led_active_low_set(phydev, led_active_low, true);
if (ret)
return ret;
- index++;
}
return 0;
@@ -527,12 +537,6 @@ static int aqcs109_config_init(struct phy_device *phydev)
if (!ret)
aqr107_chip_info(phydev);
- /* AQCS109 belongs to a chip family partially supporting 10G and 5G.
- * PMA speed ability bits are the same for all members of the family,
- * AQCS109 however supports speeds up to 2.5G only.
- */
- phy_set_max_speed(phydev, SPEED_2500);
-
return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
}
@@ -721,6 +725,31 @@ static int aqr113c_fill_interface_modes(struct phy_device *phydev)
return aqr107_fill_interface_modes(phydev);
}
+static int aqr115c_get_features(struct phy_device *phydev)
+{
+ unsigned long *supported = phydev->supported;
+
+ /* PHY supports speeds up to 2.5G with autoneg. PMA capabilities
+ * are not useful.
+ */
+ linkmode_or(supported, supported, phy_gbit_features);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, supported);
+
+ return 0;
+}
+
+static int aqr111_get_features(struct phy_device *phydev)
+{
+ /* PHY supports speeds up to 5G with autoneg. PMA capabilities
+ * are not useful.
+ */
+ aqr115c_get_features(phydev);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->supported);
+
+ return 0;
+}
+
static int aqr113c_config_init(struct phy_device *phydev)
{
int ret;
@@ -757,15 +786,6 @@ static int aqr107_probe(struct phy_device *phydev)
return aqr_hwmon_probe(phydev);
}
-static int aqr111_config_init(struct phy_device *phydev)
-{
- /* AQR111 reports supporting speed up to 10G,
- * however only speeds up to 5G are supported.
- */
- phy_set_max_speed(phydev, SPEED_5000);
-
- return aqr107_config_init(phydev);
-}
static struct phy_driver aqr_driver[] = {
{
@@ -843,6 +863,7 @@ static struct phy_driver aqr_driver[] = {
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
+ .get_features = aqr115c_get_features,
.link_change_notify = aqr107_link_change_notify,
.led_brightness_set = aqr_phy_led_brightness_set,
.led_hw_is_supported = aqr_phy_led_hw_is_supported,
@@ -855,7 +876,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR111",
.probe = aqr107_probe,
.get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr111_config_init,
+ .config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
@@ -867,6 +888,7 @@ static struct phy_driver aqr_driver[] = {
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
+ .get_features = aqr111_get_features,
.link_change_notify = aqr107_link_change_notify,
.led_brightness_set = aqr_phy_led_brightness_set,
.led_hw_is_supported = aqr_phy_led_hw_is_supported,
@@ -879,7 +901,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR111B0",
.probe = aqr107_probe,
.get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr111_config_init,
+ .config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
@@ -891,6 +913,7 @@ static struct phy_driver aqr_driver[] = {
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
+ .get_features = aqr111_get_features,
.link_change_notify = aqr107_link_change_notify,
.led_brightness_set = aqr_phy_led_brightness_set,
.led_hw_is_supported = aqr_phy_led_hw_is_supported,
@@ -1000,7 +1023,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR114C",
.probe = aqr107_probe,
.get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr111_config_init,
+ .config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
@@ -1012,6 +1035,7 @@ static struct phy_driver aqr_driver[] = {
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
+ .get_features = aqr111_get_features,
.link_change_notify = aqr107_link_change_notify,
.led_brightness_set = aqr_phy_led_brightness_set,
.led_hw_is_supported = aqr_phy_led_hw_is_supported,
@@ -1036,6 +1060,7 @@ static struct phy_driver aqr_driver[] = {
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
+ .get_features = aqr115c_get_features,
.link_change_notify = aqr107_link_change_notify,
.led_brightness_set = aqr_phy_led_brightness_set,
.led_hw_is_supported = aqr_phy_led_hw_is_supported,
diff --git a/drivers/net/phy/ax88796b_rust.rs b/drivers/net/phy/ax88796b_rust.rs
index 5c92572962dc..8c7eb009d9fc 100644
--- a/drivers/net/phy/ax88796b_rust.rs
+++ b/drivers/net/phy/ax88796b_rust.rs
@@ -6,7 +6,7 @@
//! C version of this driver: [`drivers/net/phy/ax88796b.c`](./ax88796b.c)
use kernel::{
c_str,
- net::phy::{self, DeviceId, Driver},
+ net::phy::{self, reg::C22, DeviceId, Driver},
prelude::*,
uapi,
};
@@ -24,7 +24,6 @@ kernel::module_phy_driver! {
license: "GPL",
}
-const MII_BMCR: u16 = uapi::MII_BMCR as u16;
const BMCR_SPEED100: u16 = uapi::BMCR_SPEED100 as u16;
const BMCR_FULLDPLX: u16 = uapi::BMCR_FULLDPLX as u16;
@@ -33,7 +32,7 @@ const BMCR_FULLDPLX: u16 = uapi::BMCR_FULLDPLX as u16;
// Toggle BMCR_RESET bit off to accommodate broken AX8796B PHY implementation
// such as used on the Individual Computers' X-Surf 100 Zorro card.
fn asix_soft_reset(dev: &mut phy::Device) -> Result {
- dev.write(uapi::MII_BMCR as u16, 0)?;
+ dev.write(C22::BMCR, 0)?;
dev.genphy_soft_reset()
}
@@ -55,7 +54,7 @@ impl Driver for PhyAX88772A {
}
// If MII_LPA is 0, phy_resolve_aneg_linkmode() will fail to resolve
// linkmode so use MII_BMCR as default values.
- let ret = dev.read(MII_BMCR)?;
+ let ret = dev.read(C22::BMCR)?;
if ret & BMCR_SPEED100 != 0 {
dev.set_speed(uapi::SPEED_100);
diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c
index 874a1b64b115..208e8f561e06 100644
--- a/drivers/net/phy/bcm-phy-ptp.c
+++ b/drivers/net/phy/bcm-phy-ptp.c
@@ -4,7 +4,7 @@
* Copyright (C) 2022 Jonathan Lemon <jonathan.lemon@gmail.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/ptp_classify.h>
diff --git a/drivers/net/phy/bcm84881.c b/drivers/net/phy/bcm84881.c
index f1d47c264058..97da3aee4942 100644
--- a/drivers/net/phy/bcm84881.c
+++ b/drivers/net/phy/bcm84881.c
@@ -132,7 +132,7 @@ static int bcm84881_aneg_done(struct phy_device *phydev)
bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR);
if (bmsr < 0)
- return val;
+ return bmsr;
return !!(val & MDIO_AN_STAT1_COMPLETE) &&
!!(bmsr & BMSR_ANEGCOMPLETE);
@@ -158,7 +158,7 @@ static int bcm84881_read_status(struct phy_device *phydev)
bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR);
if (bmsr < 0)
- return val;
+ return bmsr;
phydev->autoneg_complete = !!(val & MDIO_AN_STAT1_COMPLETE) &&
!!(bmsr & BMSR_ANEGCOMPLETE);
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index efeb643c1373..fc247f479257 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -271,8 +271,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
- /* Private data pointer is NULL on DP83825 */
- if (!dp83822 || !dp83822->fx_enabled)
+ if (!dp83822->fx_enabled)
misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
DP83822_DUP_MODE_CHANGE_INT_EN |
DP83822_SPEED_CHANGED_INT_EN;
@@ -292,8 +291,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_PAGE_RX_INT_EN |
DP83822_EEE_ERROR_CHANGE_INT_EN);
- /* Private data pointer is NULL on DP83825 */
- if (!dp83822 || !dp83822->fx_enabled)
+ if (!dp83822->fx_enabled)
misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
@@ -691,10 +689,9 @@ static int dp83822_read_straps(struct phy_device *phydev)
return 0;
}
-static int dp83822_probe(struct phy_device *phydev)
+static int dp8382x_probe(struct phy_device *phydev)
{
struct dp83822_private *dp83822;
- int ret;
dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822),
GFP_KERNEL);
@@ -703,6 +700,20 @@ static int dp83822_probe(struct phy_device *phydev)
phydev->priv = dp83822;
+ return 0;
+}
+
+static int dp83822_probe(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822;
+ int ret;
+
+ ret = dp8382x_probe(phydev);
+ if (ret)
+ return ret;
+
+ dp83822 = phydev->priv;
+
ret = dp83822_read_straps(phydev);
if (ret)
return ret;
@@ -717,14 +728,11 @@ static int dp83822_probe(struct phy_device *phydev)
static int dp83826_probe(struct phy_device *phydev)
{
- struct dp83822_private *dp83822;
-
- dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822),
- GFP_KERNEL);
- if (!dp83822)
- return -ENOMEM;
+ int ret;
- phydev->priv = dp83822;
+ ret = dp8382x_probe(phydev);
+ if (ret)
+ return ret;
dp83826_of_init(phydev);
@@ -795,6 +803,7 @@ static int dp83822_resume(struct phy_device *phydev)
PHY_ID_MATCH_MODEL(_id), \
.name = (_name), \
/* PHY_BASIC_FEATURES */ \
+ .probe = dp8382x_probe, \
.soft_reset = dp83822_phy_reset, \
.config_init = dp8382x_config_init, \
.get_wol = dp83822_get_wol, \
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index d7aaefb5226b..5f056d7db83e 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -645,7 +645,6 @@ static int dp83869_configure_fiber(struct phy_device *phydev,
phydev->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
- linkmode_set_bit(ADVERTISED_FIBRE, phydev->advertising);
if (dp83869->mode == DP83869_RGMII_1000_BASE) {
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
index 551e37786c2d..92aa3a2b9744 100644
--- a/drivers/net/phy/dp83td510.c
+++ b/drivers/net/phy/dp83td510.c
@@ -58,6 +58,10 @@ static const u16 dp83td510_mse_sqi_map[] = {
0x0000 /* 24dB =< SNR */
};
+struct dp83td510_priv {
+ bool alcd_test_active;
+};
+
/* Time Domain Reflectometry (TDR) Functionality of DP83TD510 PHY
*
* I assume that this PHY is using a variation of Spread Spectrum Time Domain
@@ -169,6 +173,10 @@ static const u16 dp83td510_mse_sqi_map[] = {
#define DP83TD510E_UNKN_030E 0x30e
#define DP83TD510E_030E_VAL 0x2520
+#define DP83TD510E_ALCD_STAT 0xa9f
+#define DP83TD510E_ALCD_COMPLETE BIT(15)
+#define DP83TD510E_ALCD_CABLE_LENGTH GENMASK(10, 0)
+
static int dp83td510_config_intr(struct phy_device *phydev)
{
int ret;
@@ -325,8 +333,23 @@ static int dp83td510_get_sqi_max(struct phy_device *phydev)
*/
static int dp83td510_cable_test_start(struct phy_device *phydev)
{
+ struct dp83td510_priv *priv = phydev->priv;
int ret;
+ /* If link partner is active, we won't be able to use TDR, since
+ * we can't force link partner to be silent. The autonegotiation
+ * pulses will be too frequent and the TDR sequence will be
+ * too long. So, TDR will always fail. Since the link is established
+ * we already know that the cable is working, so we can get some
+ * extra information line the cable length using ALCD.
+ */
+ if (phydev->link) {
+ priv->alcd_test_active = true;
+ return 0;
+ }
+
+ priv->alcd_test_active = false;
+
ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_CTRL,
DP83TD510E_CTRL_HW_RESET);
if (ret)
@@ -402,8 +425,8 @@ static int dp83td510_cable_test_start(struct phy_device *phydev)
}
/**
- * dp83td510_cable_test_get_status - Get the status of the cable test for the
- * DP83TD510 PHY.
+ * dp83td510_cable_test_get_tdr_status - Get the status of the TDR test for the
+ * DP83TD510 PHY.
* @phydev: Pointer to the phy_device structure.
* @finished: Pointer to a boolean that indicates whether the test is finished.
*
@@ -411,13 +434,11 @@ static int dp83td510_cable_test_start(struct phy_device *phydev)
*
* Returns: 0 on success or a negative error code on failure.
*/
-static int dp83td510_cable_test_get_status(struct phy_device *phydev,
- bool *finished)
+static int dp83td510_cable_test_get_tdr_status(struct phy_device *phydev,
+ bool *finished)
{
int ret, stat;
- *finished = false;
-
ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG);
if (ret < 0)
return ret;
@@ -459,6 +480,77 @@ static int dp83td510_cable_test_get_status(struct phy_device *phydev,
return phy_init_hw(phydev);
}
+/**
+ * dp83td510_cable_test_get_alcd_status - Get the status of the ALCD test for the
+ * DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ * @finished: Pointer to a boolean that indicates whether the test is finished.
+ *
+ * The function sets the @finished flag to true if the test is complete.
+ * The function reads the cable length and reports it to the user.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83td510_cable_test_get_alcd_status(struct phy_device *phydev,
+ bool *finished)
+{
+ unsigned int location;
+ int ret, phy_sts;
+
+ phy_sts = phy_read(phydev, DP83TD510E_PHY_STS);
+
+ if (!(phy_sts & DP83TD510E_LINK_STATUS)) {
+ /* If the link is down, we can't do any thing usable now */
+ ethnl_cable_test_result_with_src(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC,
+ ETHTOOL_A_CABLE_INF_SRC_ALCD);
+ *finished = true;
+ return 0;
+ }
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_ALCD_STAT);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & DP83TD510E_ALCD_COMPLETE))
+ return 0;
+
+ location = FIELD_GET(DP83TD510E_ALCD_CABLE_LENGTH, ret) * 100;
+
+ ethnl_cable_test_fault_length_with_src(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ location,
+ ETHTOOL_A_CABLE_INF_SRC_ALCD);
+
+ ethnl_cable_test_result_with_src(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_OK,
+ ETHTOOL_A_CABLE_INF_SRC_ALCD);
+ *finished = true;
+
+ return 0;
+}
+
+/**
+ * dp83td510_cable_test_get_status - Get the status of the cable test for the
+ * DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ * @finished: Pointer to a boolean that indicates whether the test is finished.
+ *
+ * The function sets the @finished flag to true if the test is complete.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83td510_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ struct dp83td510_priv *priv = phydev->priv;
+ *finished = false;
+
+ if (priv->alcd_test_active)
+ return dp83td510_cable_test_get_alcd_status(phydev, finished);
+
+ return dp83td510_cable_test_get_tdr_status(phydev, finished);
+}
+
static int dp83td510_get_features(struct phy_device *phydev)
{
/* This PHY can't respond on MDIO bus if no RMII clock is enabled.
@@ -477,12 +569,27 @@ static int dp83td510_get_features(struct phy_device *phydev)
return 0;
}
+static int dp83td510_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct dp83td510_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static struct phy_driver dp83td510_driver[] = {
{
PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID),
.name = "TI DP83TD510E",
.flags = PHY_POLL_CABLE_TEST,
+ .probe = dp83td510_probe,
.config_aneg = dp83td510_config_aneg,
.read_status = dp83td510_read_status,
.get_features = dp83td510_get_features,
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index c706429b225a..0ef4d7dba065 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -3,10 +3,13 @@
* Copyright (c) 2023 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
*/
#include <linux/bitfield.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
+#include "open_alliance_helpers.h"
+
#define DP83TG720S_PHY_ID 0x2000a284
/* MDIO_MMD_VEND2 registers */
@@ -14,6 +17,17 @@
#define DP83TG720S_STS_MII_INT BIT(7)
#define DP83TG720S_LINK_STATUS BIT(0)
+/* TDR Configuration Register (0x1E) */
+#define DP83TG720S_TDR_CFG 0x1e
+/* 1b = TDR start, 0b = No TDR */
+#define DP83TG720S_TDR_START BIT(15)
+/* 1b = TDR auto on link down, 0b = Manual TDR start */
+#define DP83TG720S_CFG_TDR_AUTO_RUN BIT(14)
+/* 1b = TDR done, 0b = TDR in progress */
+#define DP83TG720S_TDR_DONE BIT(1)
+/* 1b = TDR fail, 0b = TDR success */
+#define DP83TG720S_TDR_FAIL BIT(0)
+
#define DP83TG720S_PHY_RESET 0x1f
#define DP83TG720S_HW_RESET BIT(15)
@@ -22,18 +36,155 @@
/* Power Mode 0 is Normal mode */
#define DP83TG720S_LPS_CFG3_PWR_MODE_0 BIT(0)
+/* Open Aliance 1000BaseT1 compatible HDD.TDR Fault Status Register */
+#define DP83TG720S_TDR_FAULT_STATUS 0x30f
+
+/* Register 0x0301: TDR Configuration 2 */
+#define DP83TG720S_TDR_CFG2 0x301
+
+/* Register 0x0303: TDR Configuration 3 */
+#define DP83TG720S_TDR_CFG3 0x303
+
+/* Register 0x0304: TDR Configuration 4 */
+#define DP83TG720S_TDR_CFG4 0x304
+
+/* Register 0x0405: Unknown Register */
+#define DP83TG720S_UNKNOWN_0405 0x405
+
+/* Register 0x0576: TDR Master Link Down Control */
+#define DP83TG720S_TDR_MASTER_LINK_DOWN 0x576
+
#define DP83TG720S_RGMII_DELAY_CTRL 0x602
/* In RGMII mode, Enable or disable the internal delay for RXD */
#define DP83TG720S_RGMII_RX_CLK_SEL BIT(1)
/* In RGMII mode, Enable or disable the internal delay for TXD */
#define DP83TG720S_RGMII_TX_CLK_SEL BIT(0)
+/* Register 0x083F: Unknown Register */
+#define DP83TG720S_UNKNOWN_083F 0x83f
+
#define DP83TG720S_SQI_REG_1 0x871
#define DP83TG720S_SQI_OUT_WORST GENMASK(7, 5)
#define DP83TG720S_SQI_OUT GENMASK(3, 1)
#define DP83TG720_SQI_MAX 7
+/**
+ * dp83tg720_cable_test_start - Start the cable test for the DP83TG720 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * This sequence is based on the documented procedure for the DP83TG720 PHY.
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ */
+static int dp83tg720_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Initialize the PHY to run the TDR test as described in the
+ * "DP83TG720S-Q1: Configuring for Open Alliance Specification
+ * Compliance (Rev. B)" application note.
+ * Most of the registers are not documented. Some of register names
+ * are guessed by comparing the register offsets with the DP83TD510E.
+ */
+
+ /* Force master link down */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ DP83TG720S_TDR_MASTER_LINK_DOWN, 0x0400);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG2,
+ 0xa008);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG3,
+ 0x0928);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG4,
+ 0x0004);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_UNKNOWN_0405,
+ 0x6400);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_UNKNOWN_083F,
+ 0x3003);
+ if (ret)
+ return ret;
+
+ /* Start the TDR */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG,
+ DP83TG720S_TDR_START);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * dp83tg720_cable_test_get_status - Get the status of the cable test for the
+ * DP83TG720 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ * @finished: Pointer to a boolean that indicates whether the test is finished.
+ *
+ * The function sets the @finished flag to true if the test is complete.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83tg720_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret, stat;
+
+ *finished = false;
+
+ /* Read the TDR status */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG);
+ if (ret < 0)
+ return ret;
+
+ /* Check if the TDR test is done */
+ if (!(ret & DP83TG720S_TDR_DONE))
+ return 0;
+
+ /* Check for TDR test failure */
+ if (!(ret & DP83TG720S_TDR_FAIL)) {
+ int location;
+
+ /* Read fault status */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ DP83TG720S_TDR_FAULT_STATUS);
+ if (ret < 0)
+ return ret;
+
+ /* Get fault type */
+ stat = oa_1000bt1_get_ethtool_cable_result_code(ret);
+
+ /* Determine fault location */
+ location = oa_1000bt1_get_tdr_distance(ret);
+ if (location > 0)
+ ethnl_cable_test_fault_length(phydev,
+ ETHTOOL_A_CABLE_PAIR_A,
+ location);
+ } else {
+ /* Active link partner or other issues */
+ stat = ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+
+ *finished = true;
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, stat);
+
+ return phy_init_hw(phydev);
+}
+
static int dp83tg720_config_aneg(struct phy_device *phydev)
{
int ret;
@@ -195,12 +346,15 @@ static struct phy_driver dp83tg720_driver[] = {
PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID),
.name = "TI DP83TG720S",
+ .flags = PHY_POLL_CABLE_TEST,
.config_aneg = dp83tg720_config_aneg,
.read_status = dp83tg720_read_status,
.get_features = genphy_c45_pma_read_ext_abilities,
.config_init = dp83tg720_config_init,
.get_sqi = dp83tg720_get_sqi,
.get_sqi_max = dp83tg720_get_sqi_max,
+ .cable_test_start = dp83tg720_cable_test_start,
+ .cable_test_get_status = dp83tg720_cable_test_get_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index b88398e6872b..0b777cdd7078 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -553,6 +553,8 @@ static const struct sfp_upstream_ops sfp_phy_ops = {
.link_down = mv2222_sfp_link_down,
.attach = phy_sfp_attach,
.detach = phy_sfp_detach,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
};
static int mv2222_probe(struct phy_device *phydev)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index b89fbffa6a93..9964bf3dea2f 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -3613,6 +3613,8 @@ static const struct sfp_upstream_ops m88e1510_sfp_ops = {
.module_remove = m88e1510_sfp_remove,
.attach = phy_sfp_attach,
.detach = phy_sfp_detach,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
};
static int m88e1510_probe(struct phy_device *phydev)
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index ad43e280930c..6642eb642d4b 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -503,6 +503,8 @@ static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
static const struct sfp_upstream_ops mv3310_sfp_ops = {
.attach = phy_sfp_attach,
.detach = phy_sfp_detach,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
.module_insert = mv3310_sfp_insert,
};
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index a35528497a57..a5ef8fe50704 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -12,6 +12,7 @@
#define PHY_ID_LAN87XX 0x0007c150
#define PHY_ID_LAN937X 0x0007c180
+#define PHY_ID_LAN887X 0x0007c1f0
/* External Register Control Register */
#define LAN87XX_EXT_REG_CTL (0x14)
@@ -94,8 +95,155 @@
/* SQI defines */
#define LAN87XX_MAX_SQI 0x07
+/* Chiptop registers */
+#define LAN887X_PMA_EXT_ABILITY_2 0x12
+#define LAN887X_PMA_EXT_ABILITY_2_1000T1 BIT(1)
+#define LAN887X_PMA_EXT_ABILITY_2_100T1 BIT(0)
+
+/* DSP 100M registers */
+#define LAN887x_CDR_CONFIG1_100 0x0405
+#define LAN887x_LOCK1_EQLSR_CONFIG_100 0x0411
+#define LAN887x_SLV_HD_MUFAC_CONFIG_100 0x0417
+#define LAN887x_PLOCK_MUFAC_CONFIG_100 0x041c
+#define LAN887x_PROT_DISABLE_100 0x0425
+#define LAN887x_KF_LOOP_SAT_CONFIG_100 0x0454
+
+/* DSP 1000M registers */
+#define LAN887X_LOCK1_EQLSR_CONFIG 0x0811
+#define LAN887X_LOCK3_EQLSR_CONFIG 0x0813
+#define LAN887X_PROT_DISABLE 0x0825
+#define LAN887X_FFE_GAIN6 0x0843
+#define LAN887X_FFE_GAIN7 0x0844
+#define LAN887X_FFE_GAIN8 0x0845
+#define LAN887X_FFE_GAIN9 0x0846
+#define LAN887X_ECHO_DELAY_CONFIG 0x08ec
+#define LAN887X_FFE_MAX_CONFIG 0x08ee
+
+/* PCS 1000M registers */
+#define LAN887X_SCR_CONFIG_3 0x8043
+#define LAN887X_INFO_FLD_CONFIG_5 0x8048
+
+/* T1 afe registers */
+#define LAN887X_ZQCAL_CONTROL_1 0x8080
+#define LAN887X_AFE_PORT_TESTBUS_CTRL2 0x8089
+#define LAN887X_AFE_PORT_TESTBUS_CTRL4 0x808b
+#define LAN887X_AFE_PORT_TESTBUS_CTRL6 0x808d
+#define LAN887X_TX_AMPLT_1000T1_REG 0x80b0
+#define LAN887X_INIT_COEFF_DFE1_100 0x0422
+
+/* PMA registers */
+#define LAN887X_DSP_PMA_CONTROL 0x810e
+#define LAN887X_DSP_PMA_CONTROL_LNK_SYNC BIT(4)
+
+/* PCS 100M registers */
+#define LAN887X_IDLE_ERR_TIMER_WIN 0x8204
+#define LAN887X_IDLE_ERR_CNT_THRESH 0x8213
+
+/* Misc registers */
+#define LAN887X_REG_REG26 0x001a
+#define LAN887X_REG_REG26_HW_INIT_SEQ_EN BIT(8)
+
+/* Mis registers */
+#define LAN887X_MIS_CFG_REG0 0xa00
+#define LAN887X_MIS_CFG_REG0_RCLKOUT_DIS BIT(5)
+#define LAN887X_MIS_CFG_REG0_MAC_MODE_SEL GENMASK(1, 0)
+
+#define LAN887X_MAC_MODE_RGMII 0x01
+#define LAN887X_MAC_MODE_SGMII 0x03
+
+#define LAN887X_MIS_DLL_CFG_REG0 0xa01
+#define LAN887X_MIS_DLL_CFG_REG1 0xa02
+
+#define LAN887X_MIS_DLL_DELAY_EN BIT(15)
+#define LAN887X_MIS_DLL_EN BIT(0)
+#define LAN887X_MIS_DLL_CONF (LAN887X_MIS_DLL_DELAY_EN |\
+ LAN887X_MIS_DLL_EN)
+
+#define LAN887X_MIS_CFG_REG2 0xa03
+#define LAN887X_MIS_CFG_REG2_FE_LPBK_EN BIT(2)
+
+#define LAN887X_MIS_PKT_STAT_REG0 0xa06
+#define LAN887X_MIS_PKT_STAT_REG1 0xa07
+#define LAN887X_MIS_PKT_STAT_REG3 0xa09
+#define LAN887X_MIS_PKT_STAT_REG4 0xa0a
+#define LAN887X_MIS_PKT_STAT_REG5 0xa0b
+#define LAN887X_MIS_PKT_STAT_REG6 0xa0c
+
+/* Chiptop common registers */
+#define LAN887X_COMMON_LED3_LED2 0xc05
+#define LAN887X_COMMON_LED2_MODE_SEL_MASK GENMASK(4, 0)
+#define LAN887X_LED_LINK_ACT_ANY_SPEED 0x0
+
+/* MX chip top registers */
+#define LAN887X_CHIP_HARD_RST 0xf03e
+#define LAN887X_CHIP_HARD_RST_RESET BIT(0)
+
+#define LAN887X_CHIP_SOFT_RST 0xf03f
+#define LAN887X_CHIP_SOFT_RST_RESET BIT(0)
+
+#define LAN887X_SGMII_CTL 0xf01a
+#define LAN887X_SGMII_CTL_SGMII_MUX_EN BIT(0)
+
+#define LAN887X_SGMII_PCS_CFG 0xf034
+#define LAN887X_SGMII_PCS_CFG_PCS_ENA BIT(9)
+
+#define LAN887X_EFUSE_READ_DAT9 0xf209
+#define LAN887X_EFUSE_READ_DAT9_SGMII_DIS BIT(9)
+#define LAN887X_EFUSE_READ_DAT9_MAC_MODE GENMASK(1, 0)
+
+#define LAN887X_CALIB_CONFIG_100 0x437
+#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_USE_LOCAL_SMPL BIT(5)
+#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_STB_SYNC_MODE BIT(4)
+#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_CLK_ALGN_MODE BIT(3)
+#define LAN887X_CALIB_CONFIG_100_VAL \
+ (LAN887X_CALIB_CONFIG_100_CBL_DIAG_CLK_ALGN_MODE |\
+ LAN887X_CALIB_CONFIG_100_CBL_DIAG_STB_SYNC_MODE |\
+ LAN887X_CALIB_CONFIG_100_CBL_DIAG_USE_LOCAL_SMPL)
+
+#define LAN887X_MAX_PGA_GAIN_100 0x44f
+#define LAN887X_MIN_PGA_GAIN_100 0x450
+#define LAN887X_START_CBL_DIAG_100 0x45a
+#define LAN887X_CBL_DIAG_DONE BIT(1)
+#define LAN887X_CBL_DIAG_START BIT(0)
+#define LAN887X_CBL_DIAG_STOP 0x0
+
+#define LAN887X_CBL_DIAG_TDR_THRESH_100 0x45b
+#define LAN887X_CBL_DIAG_AGC_THRESH_100 0x45c
+#define LAN887X_CBL_DIAG_MIN_WAIT_CONFIG_100 0x45d
+#define LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100 0x45e
+#define LAN887X_CBL_DIAG_CYC_CONFIG_100 0x45f
+#define LAN887X_CBL_DIAG_TX_PULSE_CONFIG_100 0x460
+#define LAN887X_CBL_DIAG_MIN_PGA_GAIN_100 0x462
+#define LAN887X_CBL_DIAG_AGC_GAIN_100 0x497
+#define LAN887X_CBL_DIAG_POS_PEAK_VALUE_100 0x499
+#define LAN887X_CBL_DIAG_NEG_PEAK_VALUE_100 0x49a
+#define LAN887X_CBL_DIAG_POS_PEAK_TIME_100 0x49c
+#define LAN887X_CBL_DIAG_NEG_PEAK_TIME_100 0x49d
+
+#define MICROCHIP_CABLE_NOISE_MARGIN 20
+#define MICROCHIP_CABLE_TIME_MARGIN 89
+#define MICROCHIP_CABLE_MIN_TIME_DIFF 96
+#define MICROCHIP_CABLE_MAX_TIME_DIFF \
+ (MICROCHIP_CABLE_MIN_TIME_DIFF + MICROCHIP_CABLE_TIME_MARGIN)
+
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
-#define DRIVER_DESC "Microchip LAN87XX/LAN937x T1 PHY driver"
+#define DRIVER_DESC "Microchip LAN87XX/LAN937x/LAN887x T1 PHY driver"
+
+/* TEST_MODE_NORMAL: Non-hybrid results to calculate cable status(open/short/ok)
+ * TEST_MODE_HYBRID: Hybrid results to calculate distance to fault
+ */
+enum cable_diag_mode {
+ TEST_MODE_NORMAL,
+ TEST_MODE_HYBRID
+};
+
+/* CD_TEST_INIT: Cable test is initated
+ * CD_TEST_DONE: Cable test is done
+ */
+enum cable_diag_state {
+ CD_TEST_INIT,
+ CD_TEST_DONE
+};
struct access_ereg_val {
u8 mode;
@@ -105,6 +253,32 @@ struct access_ereg_val {
u16 mask;
};
+struct lan887x_hw_stat {
+ const char *string;
+ u8 mmd;
+ u16 reg;
+ u8 bits;
+};
+
+static const struct lan887x_hw_stat lan887x_hw_stats[] = {
+ { "TX Good Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG0, 14},
+ { "RX Good Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG1, 14},
+ { "RX ERR Count detected by PCS", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG3, 16},
+ { "TX CRC ERR Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG4, 8},
+ { "RX CRC ERR Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG5, 8},
+ { "RX ERR Count for SGMII MII2GMII", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG6, 8},
+};
+
+struct lan887x_regwr_map {
+ u8 mmd;
+ u16 reg;
+ u16 val;
+};
+
+struct lan887x_priv {
+ u64 stats[ARRAY_SIZE(lan887x_hw_stats)];
+};
+
static int lan937x_dsp_workaround(struct phy_device *phydev, u16 ereg, u8 bank)
{
u8 prev_bank;
@@ -860,6 +1034,802 @@ static int lan87xx_get_sqi_max(struct phy_device *phydev)
return LAN87XX_MAX_SQI;
}
+static int lan887x_rgmii_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* SGMII mux disable */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_SGMII_CTL,
+ LAN887X_SGMII_CTL_SGMII_MUX_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Select MAC_MODE as RGMII */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_CFG_REG0,
+ LAN887X_MIS_CFG_REG0_MAC_MODE_SEL,
+ LAN887X_MAC_MODE_RGMII);
+ if (ret < 0)
+ return ret;
+
+ /* Disable PCS */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_SGMII_PCS_CFG,
+ LAN887X_SGMII_PCS_CFG_PCS_ENA);
+ if (ret < 0)
+ return ret;
+
+ /* LAN887x Errata: RGMII rx clock active in SGMII mode
+ * Disabled it for SGMII mode
+ * Re-enabling it for RGMII mode
+ */
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_MIS_CFG_REG0,
+ LAN887X_MIS_CFG_REG0_RCLKOUT_DIS);
+}
+
+static int lan887x_sgmii_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* SGMII mux enable */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_SGMII_CTL,
+ LAN887X_SGMII_CTL_SGMII_MUX_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Select MAC_MODE as SGMII */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_CFG_REG0,
+ LAN887X_MIS_CFG_REG0_MAC_MODE_SEL,
+ LAN887X_MAC_MODE_SGMII);
+ if (ret < 0)
+ return ret;
+
+ /* LAN887x Errata: RGMII rx clock active in SGMII mode.
+ * So disabling it for SGMII mode
+ */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_CFG_REG0,
+ LAN887X_MIS_CFG_REG0_RCLKOUT_DIS);
+ if (ret < 0)
+ return ret;
+
+ /* Enable PCS */
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_SGMII_PCS_CFG,
+ LAN887X_SGMII_PCS_CFG_PCS_ENA);
+}
+
+static int lan887x_config_rgmii_en(struct phy_device *phydev)
+{
+ int txc;
+ int rxc;
+ int ret;
+
+ ret = lan887x_rgmii_init(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Control bit to enable/disable TX DLL delay line in signal path */
+ txc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG0);
+ if (txc < 0)
+ return txc;
+
+ /* Control bit to enable/disable RX DLL delay line in signal path */
+ rxc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG1);
+ if (rxc < 0)
+ return rxc;
+
+ /* Configures the phy to enable RX/TX delay
+ * RGMII - TX & RX delays are either added by MAC or not needed,
+ * phy should not add
+ * RGMII_ID - Configures phy to enable TX & RX delays, MAC shouldn't add
+ * RGMII_RX_ID - Configures the PHY to enable the RX delay.
+ * The MAC shouldn't add the RX delay
+ * RGMII_TX_ID - Configures the PHY to enable the TX delay.
+ * The MAC shouldn't add the TX delay in this case
+ */
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ txc &= ~LAN887X_MIS_DLL_CONF;
+ rxc &= ~LAN887X_MIS_DLL_CONF;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ txc |= LAN887X_MIS_DLL_CONF;
+ rxc |= LAN887X_MIS_DLL_CONF;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ txc &= ~LAN887X_MIS_DLL_CONF;
+ rxc |= LAN887X_MIS_DLL_CONF;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ txc |= LAN887X_MIS_DLL_CONF;
+ rxc &= ~LAN887X_MIS_DLL_CONF;
+ break;
+ default:
+ WARN_ONCE(1, "Invalid phydev interface %d\n", phydev->interface);
+ return 0;
+ }
+
+ /* Configures the PHY to enable/disable RX delay in signal path */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG1,
+ LAN887X_MIS_DLL_CONF, rxc);
+ if (ret < 0)
+ return ret;
+
+ /* Configures the PHY to enable/disable the TX delay in signal path */
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG0,
+ LAN887X_MIS_DLL_CONF, txc);
+}
+
+static int lan887x_config_phy_interface(struct phy_device *phydev)
+{
+ int interface_mode;
+ int sgmii_dis;
+ int ret;
+
+ /* Read sku efuse data for interfaces supported by sku */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_EFUSE_READ_DAT9);
+ if (ret < 0)
+ return ret;
+
+ /* If interface_mode is 1 then efuse sets RGMII operations.
+ * If interface mode is 3 then efuse sets SGMII operations.
+ */
+ interface_mode = ret & LAN887X_EFUSE_READ_DAT9_MAC_MODE;
+ /* SGMII disable is set for RGMII operations */
+ sgmii_dis = ret & LAN887X_EFUSE_READ_DAT9_SGMII_DIS;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* Reject RGMII settings for SGMII only sku */
+ ret = -EOPNOTSUPP;
+
+ if (!((interface_mode & LAN887X_MAC_MODE_SGMII) ==
+ LAN887X_MAC_MODE_SGMII))
+ ret = lan887x_config_rgmii_en(phydev);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ /* Reject SGMII setting for RGMII only sku */
+ ret = -EOPNOTSUPP;
+
+ if (!sgmii_dis)
+ ret = lan887x_sgmii_init(phydev);
+ break;
+ default:
+ /* Reject setting for unsupported interfaces */
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int lan887x_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Enable twisted pair */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
+
+ /* First patch only supports 100Mbps and 1000Mbps force-mode.
+ * T1 Auto-Negotiation (Clause 98 of IEEE 802.3) will be added later.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+
+ return 0;
+}
+
+static int lan887x_phy_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Clear loopback */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_MIS_CFG_REG2,
+ LAN887X_MIS_CFG_REG2_FE_LPBK_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Configure default behavior of led to link and activity for any
+ * speed
+ */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_COMMON_LED3_LED2,
+ LAN887X_COMMON_LED2_MODE_SEL_MASK,
+ LAN887X_LED_LINK_ACT_ANY_SPEED);
+ if (ret < 0)
+ return ret;
+
+ /* PHY interface setup */
+ return lan887x_config_phy_interface(phydev);
+}
+
+static int lan887x_phy_config(struct phy_device *phydev,
+ const struct lan887x_regwr_map *reg_map, int cnt)
+{
+ int ret;
+
+ for (int i = 0; i < cnt; i++) {
+ ret = phy_write_mmd(phydev, reg_map[i].mmd,
+ reg_map[i].reg, reg_map[i].val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lan887x_phy_setup(struct phy_device *phydev)
+{
+ static const struct lan887x_regwr_map phy_cfg[] = {
+ /* PORT_AFE writes */
+ {MDIO_MMD_PMAPMD, LAN887X_ZQCAL_CONTROL_1, 0x4008},
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL2, 0x0000},
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL6, 0x0040},
+ /* 100T1_PCS_VENDOR writes */
+ {MDIO_MMD_PCS, LAN887X_IDLE_ERR_CNT_THRESH, 0x0008},
+ {MDIO_MMD_PCS, LAN887X_IDLE_ERR_TIMER_WIN, 0x800d},
+ /* 100T1 DSP writes */
+ {MDIO_MMD_VEND1, LAN887x_CDR_CONFIG1_100, 0x0ab1},
+ {MDIO_MMD_VEND1, LAN887x_LOCK1_EQLSR_CONFIG_100, 0x5274},
+ {MDIO_MMD_VEND1, LAN887x_SLV_HD_MUFAC_CONFIG_100, 0x0d74},
+ {MDIO_MMD_VEND1, LAN887x_PLOCK_MUFAC_CONFIG_100, 0x0aea},
+ {MDIO_MMD_VEND1, LAN887x_PROT_DISABLE_100, 0x0360},
+ {MDIO_MMD_VEND1, LAN887x_KF_LOOP_SAT_CONFIG_100, 0x0c30},
+ /* 1000T1 DSP writes */
+ {MDIO_MMD_VEND1, LAN887X_LOCK1_EQLSR_CONFIG, 0x2a78},
+ {MDIO_MMD_VEND1, LAN887X_LOCK3_EQLSR_CONFIG, 0x1368},
+ {MDIO_MMD_VEND1, LAN887X_PROT_DISABLE, 0x1354},
+ {MDIO_MMD_VEND1, LAN887X_FFE_GAIN6, 0x3C84},
+ {MDIO_MMD_VEND1, LAN887X_FFE_GAIN7, 0x3ca5},
+ {MDIO_MMD_VEND1, LAN887X_FFE_GAIN8, 0x3ca5},
+ {MDIO_MMD_VEND1, LAN887X_FFE_GAIN9, 0x3ca5},
+ {MDIO_MMD_VEND1, LAN887X_ECHO_DELAY_CONFIG, 0x0024},
+ {MDIO_MMD_VEND1, LAN887X_FFE_MAX_CONFIG, 0x227f},
+ /* 1000T1 PCS writes */
+ {MDIO_MMD_PCS, LAN887X_SCR_CONFIG_3, 0x1e00},
+ {MDIO_MMD_PCS, LAN887X_INFO_FLD_CONFIG_5, 0x0fa1},
+ };
+
+ return lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg));
+}
+
+static int lan887x_100M_setup(struct phy_device *phydev)
+{
+ int ret;
+
+ /* (Re)configure the speed/mode dependent T1 settings */
+ if (phydev->master_slave_set == MASTER_SLAVE_CFG_MASTER_FORCE ||
+ phydev->master_slave_set == MASTER_SLAVE_CFG_MASTER_PREFERRED){
+ static const struct lan887x_regwr_map phy_cfg[] = {
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL4, 0x00b8},
+ {MDIO_MMD_PMAPMD, LAN887X_TX_AMPLT_1000T1_REG, 0x0038},
+ {MDIO_MMD_VEND1, LAN887X_INIT_COEFF_DFE1_100, 0x000f},
+ };
+
+ ret = lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg));
+ } else {
+ static const struct lan887x_regwr_map phy_cfg[] = {
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL4, 0x0038},
+ {MDIO_MMD_VEND1, LAN887X_INIT_COEFF_DFE1_100, 0x0014},
+ };
+
+ ret = lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg));
+ }
+ if (ret < 0)
+ return ret;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_REG_REG26,
+ LAN887X_REG_REG26_HW_INIT_SEQ_EN);
+}
+
+static int lan887x_1000M_setup(struct phy_device *phydev)
+{
+ static const struct lan887x_regwr_map phy_cfg[] = {
+ {MDIO_MMD_PMAPMD, LAN887X_TX_AMPLT_1000T1_REG, 0x003f},
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL4, 0x00b8},
+ };
+ int ret;
+
+ /* (Re)configure the speed/mode dependent T1 settings */
+ ret = lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg));
+ if (ret < 0)
+ return ret;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, LAN887X_DSP_PMA_CONTROL,
+ LAN887X_DSP_PMA_CONTROL_LNK_SYNC);
+}
+
+static int lan887x_link_setup(struct phy_device *phydev)
+{
+ int ret = -EINVAL;
+
+ if (phydev->speed == SPEED_1000)
+ ret = lan887x_1000M_setup(phydev);
+ else if (phydev->speed == SPEED_100)
+ ret = lan887x_100M_setup(phydev);
+
+ return ret;
+}
+
+/* LAN887x Errata: speed configuration changes require soft reset
+ * and chip soft reset
+ */
+static int lan887x_phy_reset(struct phy_device *phydev)
+{
+ int ret, val;
+
+ /* Clear 1000M link sync */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, LAN887X_DSP_PMA_CONTROL,
+ LAN887X_DSP_PMA_CONTROL_LNK_SYNC);
+ if (ret < 0)
+ return ret;
+
+ /* Clear 100M link sync */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_REG_REG26,
+ LAN887X_REG_REG26_HW_INIT_SEQ_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Chiptop soft-reset to allow the speed/mode change */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, LAN887X_CHIP_SOFT_RST,
+ LAN887X_CHIP_SOFT_RST_RESET);
+ if (ret < 0)
+ return ret;
+
+ /* CL22 soft-reset to let the link re-train */
+ ret = phy_modify(phydev, MII_BMCR, BMCR_RESET, BMCR_RESET);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for reset complete or timeout if > 10ms */
+ return phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+ 5000, 10000, true);
+}
+
+static int lan887x_phy_reconfig(struct phy_device *phydev)
+{
+ int ret;
+
+ linkmode_zero(phydev->advertising);
+
+ ret = genphy_c45_pma_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ return lan887x_link_setup(phydev);
+}
+
+static int lan887x_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ /* LAN887x Errata: speed configuration changes require soft reset
+ * and chip soft reset
+ */
+ ret = lan887x_phy_reset(phydev);
+ if (ret < 0)
+ return ret;
+
+ return lan887x_phy_reconfig(phydev);
+}
+
+static int lan887x_probe(struct phy_device *phydev)
+{
+ struct lan887x_priv *priv;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return lan887x_phy_setup(phydev);
+}
+
+static u64 lan887x_get_stat(struct phy_device *phydev, int i)
+{
+ struct lan887x_hw_stat stat = lan887x_hw_stats[i];
+ struct lan887x_priv *priv = phydev->priv;
+ int val;
+ u64 ret;
+
+ if (stat.mmd)
+ val = phy_read_mmd(phydev, stat.mmd, stat.reg);
+ else
+ val = phy_read(phydev, stat.reg);
+
+ if (val < 0) {
+ ret = U64_MAX;
+ } else {
+ val = val & ((1 << stat.bits) - 1);
+ priv->stats[i] += val;
+ ret = priv->stats[i];
+ }
+
+ return ret;
+}
+
+static void lan887x_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ for (int i = 0; i < ARRAY_SIZE(lan887x_hw_stats); i++)
+ data[i] = lan887x_get_stat(phydev, i);
+}
+
+static int lan887x_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(lan887x_hw_stats);
+}
+
+static void lan887x_get_strings(struct phy_device *phydev, u8 *data)
+{
+ for (int i = 0; i < ARRAY_SIZE(lan887x_hw_stats); i++)
+ ethtool_puts(&data, lan887x_hw_stats[i].string);
+}
+
+static int lan887x_cd_reset(struct phy_device *phydev,
+ enum cable_diag_state cd_done)
+{
+ u16 val;
+ int rc;
+
+ /* Chip hard-reset */
+ rc = phy_write_mmd(phydev, MDIO_MMD_VEND1, LAN887X_CHIP_HARD_RST,
+ LAN887X_CHIP_HARD_RST_RESET);
+ if (rc < 0)
+ return rc;
+
+ /* Wait for reset to complete */
+ rc = phy_read_poll_timeout(phydev, MII_PHYSID2, val,
+ ((val & GENMASK(15, 4)) ==
+ (PHY_ID_LAN887X & GENMASK(15, 4))),
+ 5000, 50000, true);
+ if (rc < 0)
+ return rc;
+
+ if (cd_done == CD_TEST_DONE) {
+ /* Cable diagnostics complete. Restore PHY. */
+ rc = lan887x_phy_setup(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = lan887x_phy_init(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = lan887x_phy_reconfig(phydev);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int lan887x_cable_test_prep(struct phy_device *phydev,
+ enum cable_diag_mode mode)
+{
+ static const struct lan887x_regwr_map values[] = {
+ {MDIO_MMD_VEND1, LAN887X_MAX_PGA_GAIN_100, 0x1f},
+ {MDIO_MMD_VEND1, LAN887X_MIN_PGA_GAIN_100, 0x0},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_TDR_THRESH_100, 0x1},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_AGC_THRESH_100, 0x3c},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MIN_WAIT_CONFIG_100, 0x0},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100, 0x46},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_CYC_CONFIG_100, 0x5a},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_TX_PULSE_CONFIG_100, 0x44d5},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MIN_PGA_GAIN_100, 0x0},
+
+ };
+ int rc;
+
+ rc = lan887x_cd_reset(phydev, CD_TEST_INIT);
+ if (rc < 0)
+ return rc;
+
+ /* Forcing DUT to master mode, as we don't care about
+ * mode during diagnostics
+ */
+ rc = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL,
+ MDIO_PMA_PMD_BT1_CTRL_CFG_MST);
+ if (rc < 0)
+ return rc;
+
+ rc = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x80b0, 0x0038);
+ if (rc < 0)
+ return rc;
+
+ rc = phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CALIB_CONFIG_100, 0,
+ LAN887X_CALIB_CONFIG_100_VAL);
+ if (rc < 0)
+ return rc;
+
+ for (int i = 0; i < ARRAY_SIZE(values); i++) {
+ rc = phy_write_mmd(phydev, values[i].mmd, values[i].reg,
+ values[i].val);
+ if (rc < 0)
+ return rc;
+
+ if (mode &&
+ values[i].reg == LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100) {
+ rc = phy_write_mmd(phydev, values[i].mmd,
+ values[i].reg, 0xa);
+ if (rc < 0)
+ return rc;
+ }
+ }
+
+ if (mode == TEST_MODE_HYBRID) {
+ rc = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD,
+ LAN887X_AFE_PORT_TESTBUS_CTRL4,
+ BIT(0), BIT(0));
+ if (rc < 0)
+ return rc;
+ }
+
+ /* HW_INIT 100T1, Get DUT running in 100T1 mode */
+ rc = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_REG_REG26,
+ LAN887X_REG_REG26_HW_INIT_SEQ_EN,
+ LAN887X_REG_REG26_HW_INIT_SEQ_EN);
+ if (rc < 0)
+ return rc;
+
+ /* Cable diag requires hard reset and is sensitive regarding the delays.
+ * Hard reset is expected into and out of cable diag.
+ * Wait for 50ms
+ */
+ msleep(50);
+
+ /* Start cable diag */
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100,
+ LAN887X_CBL_DIAG_START);
+}
+
+static int lan887x_cable_test_chk(struct phy_device *phydev,
+ enum cable_diag_mode mode)
+{
+ int val;
+ int rc;
+
+ if (mode == TEST_MODE_HYBRID) {
+ /* Cable diag requires hard reset and is sensitive regarding the delays.
+ * Hard reset is expected into and out of cable diag.
+ * Wait for cable diag to complete.
+ * Minimum wait time is 50ms if the condition is not a match.
+ */
+ rc = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100, val,
+ ((val & LAN887X_CBL_DIAG_DONE) ==
+ LAN887X_CBL_DIAG_DONE),
+ 50000, 500000, false);
+ if (rc < 0)
+ return rc;
+ } else {
+ rc = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100);
+ if (rc < 0)
+ return rc;
+
+ if ((rc & LAN887X_CBL_DIAG_DONE) != LAN887X_CBL_DIAG_DONE)
+ return -EAGAIN;
+ }
+
+ /* Stop cable diag */
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100,
+ LAN887X_CBL_DIAG_STOP);
+}
+
+static int lan887x_cable_test_start(struct phy_device *phydev)
+{
+ int rc, ret;
+
+ rc = lan887x_cable_test_prep(phydev, TEST_MODE_NORMAL);
+ if (rc < 0) {
+ ret = lan887x_cd_reset(phydev, CD_TEST_DONE);
+ if (ret < 0)
+ return ret;
+
+ return rc;
+ }
+
+ return 0;
+}
+
+static int lan887x_cable_test_report(struct phy_device *phydev)
+{
+ int pos_peak_cycle, pos_peak_cycle_hybrid, pos_peak_in_phases;
+ int pos_peak_time, pos_peak_time_hybrid, neg_peak_time;
+ int neg_peak_cycle, neg_peak_in_phases;
+ int pos_peak_in_phases_hybrid;
+ int gain_idx, gain_idx_hybrid;
+ int pos_peak_phase_hybrid;
+ int pos_peak, neg_peak;
+ int distance;
+ int detect;
+ int length;
+ int ret;
+ int rc;
+
+ /* Read non-hybrid results */
+ gain_idx = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_AGC_GAIN_100);
+ if (gain_idx < 0) {
+ rc = gain_idx;
+ goto error;
+ }
+
+ pos_peak = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_POS_PEAK_VALUE_100);
+ if (pos_peak < 0) {
+ rc = pos_peak;
+ goto error;
+ }
+
+ neg_peak = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_NEG_PEAK_VALUE_100);
+ if (neg_peak < 0) {
+ rc = neg_peak;
+ goto error;
+ }
+
+ pos_peak_time = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_POS_PEAK_TIME_100);
+ if (pos_peak_time < 0) {
+ rc = pos_peak_time;
+ goto error;
+ }
+
+ neg_peak_time = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_NEG_PEAK_TIME_100);
+ if (neg_peak_time < 0) {
+ rc = neg_peak_time;
+ goto error;
+ }
+
+ /* Calculate non-hybrid values */
+ pos_peak_cycle = (pos_peak_time >> 7) & 0x7f;
+ pos_peak_in_phases = (pos_peak_cycle * 96) + (pos_peak_time & 0x7f);
+ neg_peak_cycle = (neg_peak_time >> 7) & 0x7f;
+ neg_peak_in_phases = (neg_peak_cycle * 96) + (neg_peak_time & 0x7f);
+
+ /* Deriving the status of cable */
+ if (pos_peak > MICROCHIP_CABLE_NOISE_MARGIN &&
+ neg_peak > MICROCHIP_CABLE_NOISE_MARGIN && gain_idx >= 0) {
+ if (pos_peak_in_phases > neg_peak_in_phases &&
+ ((pos_peak_in_phases - neg_peak_in_phases) >=
+ MICROCHIP_CABLE_MIN_TIME_DIFF) &&
+ ((pos_peak_in_phases - neg_peak_in_phases) <
+ MICROCHIP_CABLE_MAX_TIME_DIFF) &&
+ pos_peak_in_phases > 0) {
+ detect = LAN87XX_CABLE_TEST_SAME_SHORT;
+ } else if (neg_peak_in_phases > pos_peak_in_phases &&
+ ((neg_peak_in_phases - pos_peak_in_phases) >=
+ MICROCHIP_CABLE_MIN_TIME_DIFF) &&
+ ((neg_peak_in_phases - pos_peak_in_phases) <
+ MICROCHIP_CABLE_MAX_TIME_DIFF) &&
+ neg_peak_in_phases > 0) {
+ detect = LAN87XX_CABLE_TEST_OPEN;
+ } else {
+ detect = LAN87XX_CABLE_TEST_OK;
+ }
+ } else {
+ detect = LAN87XX_CABLE_TEST_OK;
+ }
+
+ if (detect == LAN87XX_CABLE_TEST_OK) {
+ distance = 0;
+ goto get_len;
+ }
+
+ /* Re-initialize PHY and start cable diag test */
+ rc = lan887x_cable_test_prep(phydev, TEST_MODE_HYBRID);
+ if (rc < 0)
+ goto cd_stop;
+
+ /* Wait for cable diag test completion */
+ rc = lan887x_cable_test_chk(phydev, TEST_MODE_HYBRID);
+ if (rc < 0)
+ goto cd_stop;
+
+ /* Read hybrid results */
+ gain_idx_hybrid = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_AGC_GAIN_100);
+ if (gain_idx_hybrid < 0) {
+ rc = gain_idx_hybrid;
+ goto error;
+ }
+
+ pos_peak_time_hybrid = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_POS_PEAK_TIME_100);
+ if (pos_peak_time_hybrid < 0) {
+ rc = pos_peak_time_hybrid;
+ goto error;
+ }
+
+ /* Calculate hybrid values to derive cable length to fault */
+ pos_peak_cycle_hybrid = (pos_peak_time_hybrid >> 7) & 0x7f;
+ pos_peak_phase_hybrid = pos_peak_time_hybrid & 0x7f;
+ pos_peak_in_phases_hybrid = pos_peak_cycle_hybrid * 96 +
+ pos_peak_phase_hybrid;
+
+ /* Distance to fault calculation.
+ * distance = (peak_in_phases - peak_in_phases_hybrid) *
+ * propagationconstant.
+ * constant to convert number of phases to meters
+ * propagationconstant = 0.015953
+ * (0.6811 * 2.9979 * 156.2499 * 0.0001 * 0.5)
+ * Applying constant 1.5953 as ethtool further devides by 100 to
+ * convert to meters.
+ */
+ if (detect == LAN87XX_CABLE_TEST_OPEN) {
+ distance = (((pos_peak_in_phases - pos_peak_in_phases_hybrid)
+ * 15953) / 10000);
+ } else if (detect == LAN87XX_CABLE_TEST_SAME_SHORT) {
+ distance = (((neg_peak_in_phases - pos_peak_in_phases_hybrid)
+ * 15953) / 10000);
+ } else {
+ distance = 0;
+ }
+
+get_len:
+ rc = lan887x_cd_reset(phydev, CD_TEST_DONE);
+ if (rc < 0)
+ return rc;
+
+ length = ((u32)distance & GENMASK(15, 0));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ lan87xx_cable_test_report_trans(detect));
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A, length);
+
+ return 0;
+
+cd_stop:
+ /* Stop cable diag */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100,
+ LAN887X_CBL_DIAG_STOP);
+ if (ret < 0)
+ return ret;
+
+error:
+ /* Cable diag test failed */
+ ret = lan887x_cd_reset(phydev, CD_TEST_DONE);
+ if (ret < 0)
+ return ret;
+
+ /* Return error in failure case */
+ return rc;
+}
+
+static int lan887x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int rc;
+
+ rc = lan887x_cable_test_chk(phydev, TEST_MODE_NORMAL);
+ if (rc < 0) {
+ /* Let PHY statemachine poll again */
+ if (rc == -EAGAIN)
+ return 0;
+ return rc;
+ }
+
+ /* Cable diag test complete */
+ *finished = true;
+
+ /* Retrieve test status and cable length to fault */
+ return lan887x_cable_test_report(phydev);
+}
+
static struct phy_driver microchip_t1_phy_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX),
@@ -894,6 +1864,23 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.get_sqi_max = lan87xx_get_sqi_max,
.cable_test_start = lan87xx_cable_test_start,
.cable_test_get_status = lan87xx_cable_test_get_status,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN887X),
+ .name = "Microchip LAN887x T1 PHY",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = lan887x_probe,
+ .get_features = lan887x_get_features,
+ .config_init = lan887x_phy_init,
+ .config_aneg = lan887x_config_aneg,
+ .get_stats = lan887x_get_stats,
+ .get_sset_count = lan887x_get_sset_count,
+ .get_strings = lan887x_get_strings,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_status = genphy_c45_read_status,
+ .cable_test_start = lan887x_cable_test_start,
+ .cable_test_get_status = lan887x_cable_test_get_status,
}
};
@@ -902,6 +1889,7 @@ module_phy_driver(microchip_t1_phy_driver);
static struct mdio_device_id __maybe_unused microchip_t1_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX) },
{ PHY_ID_MATCH_MODEL(PHY_ID_LAN937X) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN887X) },
{ }
};
diff --git a/drivers/net/phy/microchip_t1s.c b/drivers/net/phy/microchip_t1s.c
index 534ca7d1b061..3614839a8e51 100644
--- a/drivers/net/phy/microchip_t1s.c
+++ b/drivers/net/phy/microchip_t1s.c
@@ -268,6 +268,34 @@ static int lan86xx_read_status(struct phy_device *phydev)
return 0;
}
+/* OPEN Alliance 10BASE-T1x compliance MAC-PHYs will have both C22 and
+ * C45 registers space. If the PHY is discovered via C22 bus protocol it assumes
+ * it uses C22 protocol and always uses C22 registers indirect access to access
+ * C45 registers. This is because, we don't have a clean separation between
+ * C22/C45 register space and C22/C45 MDIO bus protocols. Resulting, PHY C45
+ * registers direct access can't be used which can save multiple SPI bus access.
+ * To support this feature, set .read_mmd/.write_mmd in the PHY driver to call
+ * .read_c45/.write_c45 in the OPEN Alliance framework
+ * drivers/net/ethernet/oa_tc6.c
+ */
+static int lan865x_phy_read_mmd(struct phy_device *phydev, int devnum,
+ u16 regnum)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ int addr = phydev->mdio.addr;
+
+ return __mdiobus_c45_read(bus, addr, devnum, regnum);
+}
+
+static int lan865x_phy_write_mmd(struct phy_device *phydev, int devnum,
+ u16 regnum, u16 val)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ int addr = phydev->mdio.addr;
+
+ return __mdiobus_c45_write(bus, addr, devnum, regnum, val);
+}
+
static struct phy_driver microchip_t1s_driver[] = {
{
PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVB1),
@@ -285,6 +313,8 @@ static struct phy_driver microchip_t1s_driver[] = {
.features = PHY_BASIC_T1S_P2MP_FEATURES,
.config_init = lan865x_revb0_config_init,
.read_status = lan86xx_read_status,
+ .read_mmd = lan865x_phy_read_mmd,
+ .write_mmd = lan865x_phy_write_mmd,
.get_plca_cfg = genphy_c45_plca_get_cfg,
.set_plca_cfg = genphy_c45_plca_set_cfg,
.get_plca_status = genphy_c45_plca_get_status,
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index 7a11fdb687cc..0e91f5d1a4fd 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Motorcomm 8511/8521/8531/8531S PHY driver.
+ * Motorcomm 8511/8521/8531/8531S/8821 PHY driver.
*
* Author: Peter Geis <pgwipeout@gmail.com>
* Author: Frank <Frank.Sae@motor-comm.com>
@@ -16,8 +16,8 @@
#define PHY_ID_YT8521 0x0000011a
#define PHY_ID_YT8531 0x4f51e91b
#define PHY_ID_YT8531S 0x4f51e91a
-
-/* YT8521/YT8531S Register Overview
+#define PHY_ID_YT8821 0x4f51ea19
+/* YT8521/YT8531S/YT8821 Register Overview
* UTP Register space | FIBER Register space
* ------------------------------------------------------------
* | UTP MII | FIBER MII |
@@ -46,12 +46,12 @@
/* Specific Status Register */
#define YTPHY_SPECIFIC_STATUS_REG 0x11
-#define YTPHY_SSR_SPEED_MODE_OFFSET 14
-
-#define YTPHY_SSR_SPEED_MODE_MASK (BIT(15) | BIT(14))
-#define YTPHY_SSR_SPEED_10M 0x0
-#define YTPHY_SSR_SPEED_100M 0x1
-#define YTPHY_SSR_SPEED_1000M 0x2
+#define YTPHY_SSR_SPEED_MASK ((0x3 << 14) | BIT(9))
+#define YTPHY_SSR_SPEED_10M ((0x0 << 14))
+#define YTPHY_SSR_SPEED_100M ((0x1 << 14))
+#define YTPHY_SSR_SPEED_1000M ((0x2 << 14))
+#define YTPHY_SSR_SPEED_10G ((0x3 << 14))
+#define YTPHY_SSR_SPEED_2500M ((0x0 << 14) | BIT(9))
#define YTPHY_SSR_DUPLEX_OFFSET 13
#define YTPHY_SSR_DUPLEX BIT(13)
#define YTPHY_SSR_PAGE_RECEIVED BIT(12)
@@ -270,12 +270,89 @@
#define YT8531_SCR_CLK_SRC_REF_25M 4
#define YT8531_SCR_CLK_SRC_SSC_25M 5
+#define YT8821_SDS_EXT_CSR_CTRL_REG 0x23
+#define YT8821_SDS_EXT_CSR_VCO_LDO_EN BIT(15)
+#define YT8821_SDS_EXT_CSR_VCO_BIAS_LPF_EN BIT(8)
+
+#define YT8821_UTP_EXT_PI_CTRL_REG 0x56
+#define YT8821_UTP_EXT_PI_RST_N_FIFO BIT(5)
+#define YT8821_UTP_EXT_PI_TX_CLK_SEL_AFE BIT(4)
+#define YT8821_UTP_EXT_PI_RX_CLK_3_SEL_AFE BIT(3)
+#define YT8821_UTP_EXT_PI_RX_CLK_2_SEL_AFE BIT(2)
+#define YT8821_UTP_EXT_PI_RX_CLK_1_SEL_AFE BIT(1)
+#define YT8821_UTP_EXT_PI_RX_CLK_0_SEL_AFE BIT(0)
+
+#define YT8821_UTP_EXT_VCT_CFG6_CTRL_REG 0x97
+#define YT8821_UTP_EXT_FECHO_AMP_TH_HUGE GENMASK(15, 8)
+
+#define YT8821_UTP_EXT_ECHO_CTRL_REG 0x336
+#define YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000 GENMASK(14, 8)
+
+#define YT8821_UTP_EXT_GAIN_CTRL_REG 0x340
+#define YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000 GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_RPDN_CTRL_REG 0x34E
+#define YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500 BIT(15)
+#define YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500 BIT(7)
+#define YT8821_UTP_EXT_RPDN_IPR_SHT_2500 GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_TH_20DB_2500_CTRL_REG 0x36A
+#define YT8821_UTP_EXT_TH_20DB_2500 GENMASK(15, 0)
+
+#define YT8821_UTP_EXT_TRACE_CTRL_REG 0x372
+#define YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500 GENMASK(14, 8)
+#define YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500 GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_ALPHA_IPR_CTRL_REG 0x374
+#define YT8821_UTP_EXT_ALPHA_SHT_2500 GENMASK(14, 8)
+#define YT8821_UTP_EXT_IPR_LNG_2500 GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_PLL_CTRL_REG 0x450
+#define YT8821_UTP_EXT_PLL_SPARE_CFG GENMASK(7, 0)
+
+#define YT8821_UTP_EXT_DAC_IMID_CH_2_3_CTRL_REG 0x466
+#define YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG GENMASK(14, 8)
+#define YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_DAC_IMID_CH_0_1_CTRL_REG 0x467
+#define YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG GENMASK(14, 8)
+#define YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_DAC_IMSB_CH_2_3_CTRL_REG 0x468
+#define YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG GENMASK(14, 8)
+#define YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_DAC_IMSB_CH_0_1_CTRL_REG 0x469
+#define YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG GENMASK(14, 8)
+#define YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_MU_COARSE_FR_CTRL_REG 0x4B3
+#define YT8821_UTP_EXT_MU_COARSE_FR_F_FFE GENMASK(14, 12)
+#define YT8821_UTP_EXT_MU_COARSE_FR_F_FBE GENMASK(10, 8)
+
+#define YT8821_UTP_EXT_MU_FINE_FR_CTRL_REG 0x4B5
+#define YT8821_UTP_EXT_MU_FINE_FR_F_FFE GENMASK(14, 12)
+#define YT8821_UTP_EXT_MU_FINE_FR_F_FBE GENMASK(10, 8)
+
+#define YT8821_UTP_EXT_VGA_LPF1_CAP_CTRL_REG 0x4D2
+#define YT8821_UTP_EXT_VGA_LPF1_CAP_OTHER GENMASK(7, 4)
+#define YT8821_UTP_EXT_VGA_LPF1_CAP_2500 GENMASK(3, 0)
+
+#define YT8821_UTP_EXT_VGA_LPF2_CAP_CTRL_REG 0x4D3
+#define YT8821_UTP_EXT_VGA_LPF2_CAP_OTHER GENMASK(7, 4)
+#define YT8821_UTP_EXT_VGA_LPF2_CAP_2500 GENMASK(3, 0)
+
+#define YT8821_UTP_EXT_TXGE_NFR_FR_THP_CTRL_REG 0x660
+#define YT8821_UTP_EXT_NFR_TX_ABILITY BIT(3)
/* Extended Register end */
#define YTPHY_DTS_OUTPUT_CLK_DIS 0
#define YTPHY_DTS_OUTPUT_CLK_25M 25000000
#define YTPHY_DTS_OUTPUT_CLK_125M 125000000
+#define YT8821_CHIP_MODE_AUTO_BX2500_SGMII 0
+#define YT8821_CHIP_MODE_FORCE_BX2500 1
+
struct yt8521_priv {
/* combo_advertising is used for case of YT8521 in combo mode,
* this means that yt8521 may work in utp or fiber mode which depends
@@ -1187,8 +1264,7 @@ static int yt8521_adjust_status(struct phy_device *phydev, int status,
else
duplex = DUPLEX_FULL; /* for fiber, it always DUPLEX_FULL */
- speed_mode = (status & YTPHY_SSR_SPEED_MODE_MASK) >>
- YTPHY_SSR_SPEED_MODE_OFFSET;
+ speed_mode = status & YTPHY_SSR_SPEED_MASK;
switch (speed_mode) {
case YTPHY_SSR_SPEED_10M:
@@ -2252,6 +2328,572 @@ static int yt8521_get_features(struct phy_device *phydev)
return ret;
}
+/**
+ * yt8821_get_features - read mmd register to get 2.5G capability
+ * @phydev: target phy_device struct
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_ext_abilities(phydev);
+ if (ret < 0)
+ return ret;
+
+ return genphy_read_abilities(phydev);
+}
+
+/**
+ * yt8821_get_rate_matching - read register to get phy chip mode
+ * @phydev: target phy_device struct
+ * @iface: PHY data interface type
+ *
+ * Returns: rate matching type or negative errno code
+ */
+static int yt8821_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ int val;
+
+ val = ytphy_read_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG);
+ if (val < 0)
+ return val;
+
+ if (FIELD_GET(YT8521_CCR_MODE_SEL_MASK, val) ==
+ YT8821_CHIP_MODE_FORCE_BX2500)
+ return RATE_MATCH_PAUSE;
+
+ return RATE_MATCH_NONE;
+}
+
+/**
+ * yt8821_aneg_done() - determines the auto negotiation result
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0(no link)or 1(utp link) or negative errno code
+ */
+static int yt8821_aneg_done(struct phy_device *phydev)
+{
+ return yt8521_aneg_done_paged(phydev, YT8521_RSSR_UTP_SPACE);
+}
+
+/**
+ * yt8821_serdes_init() - serdes init
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_serdes_init(struct phy_device *phydev)
+{
+ int old_page;
+ int ret = 0;
+ u16 mask;
+ u16 set;
+
+ old_page = phy_select_page(phydev, YT8521_RSSR_FIBER_SPACE);
+ if (old_page < 0) {
+ phydev_err(phydev, "Failed to select page: %d\n",
+ old_page);
+ goto err_restore_page;
+ }
+
+ ret = __phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_SDS_EXT_CSR_VCO_LDO_EN |
+ YT8821_SDS_EXT_CSR_VCO_BIAS_LPF_EN;
+ set = YT8821_SDS_EXT_CSR_VCO_LDO_EN;
+ ret = ytphy_modify_ext(phydev, YT8821_SDS_EXT_CSR_CTRL_REG, mask,
+ set);
+
+err_restore_page:
+ return phy_restore_page(phydev, old_page, ret);
+}
+
+/**
+ * yt8821_utp_init() - utp init
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_utp_init(struct phy_device *phydev)
+{
+ int old_page;
+ int ret = 0;
+ u16 mask;
+ u16 save;
+ u16 set;
+
+ old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE);
+ if (old_page < 0) {
+ phydev_err(phydev, "Failed to select page: %d\n",
+ old_page);
+ goto err_restore_page;
+ }
+
+ mask = YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500 |
+ YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500 |
+ YT8821_UTP_EXT_RPDN_IPR_SHT_2500;
+ set = YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500 |
+ YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500;
+ ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_RPDN_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_VGA_LPF1_CAP_OTHER |
+ YT8821_UTP_EXT_VGA_LPF1_CAP_2500;
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_VGA_LPF1_CAP_CTRL_REG,
+ mask, 0);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_VGA_LPF2_CAP_OTHER |
+ YT8821_UTP_EXT_VGA_LPF2_CAP_2500;
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_VGA_LPF2_CAP_CTRL_REG,
+ mask, 0);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500 |
+ YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500;
+ set = FIELD_PREP(YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500, 0x5a) |
+ FIELD_PREP(YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500, 0x3c);
+ ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_TRACE_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_IPR_LNG_2500;
+ set = FIELD_PREP(YT8821_UTP_EXT_IPR_LNG_2500, 0x6c);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_ALPHA_IPR_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000;
+ set = FIELD_PREP(YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000, 0x2a);
+ ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_ECHO_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000;
+ set = FIELD_PREP(YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000, 0x22);
+ ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_GAIN_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_TH_20DB_2500;
+ set = FIELD_PREP(YT8821_UTP_EXT_TH_20DB_2500, 0x8000);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_TH_20DB_2500_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_MU_COARSE_FR_F_FFE |
+ YT8821_UTP_EXT_MU_COARSE_FR_F_FBE;
+ set = FIELD_PREP(YT8821_UTP_EXT_MU_COARSE_FR_F_FFE, 0x7) |
+ FIELD_PREP(YT8821_UTP_EXT_MU_COARSE_FR_F_FBE, 0x7);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_MU_COARSE_FR_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_MU_FINE_FR_F_FFE |
+ YT8821_UTP_EXT_MU_FINE_FR_F_FBE;
+ set = FIELD_PREP(YT8821_UTP_EXT_MU_FINE_FR_F_FFE, 0x2) |
+ FIELD_PREP(YT8821_UTP_EXT_MU_FINE_FR_F_FBE, 0x2);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_MU_FINE_FR_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ /* save YT8821_UTP_EXT_PI_CTRL_REG's val for use later */
+ ret = ytphy_read_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG);
+ if (ret < 0)
+ goto err_restore_page;
+
+ save = ret;
+
+ mask = YT8821_UTP_EXT_PI_TX_CLK_SEL_AFE |
+ YT8821_UTP_EXT_PI_RX_CLK_3_SEL_AFE |
+ YT8821_UTP_EXT_PI_RX_CLK_2_SEL_AFE |
+ YT8821_UTP_EXT_PI_RX_CLK_1_SEL_AFE |
+ YT8821_UTP_EXT_PI_RX_CLK_0_SEL_AFE;
+ ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG,
+ mask, 0);
+ if (ret < 0)
+ goto err_restore_page;
+
+ /* restore YT8821_UTP_EXT_PI_CTRL_REG's val */
+ ret = ytphy_write_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG, save);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_FECHO_AMP_TH_HUGE;
+ set = FIELD_PREP(YT8821_UTP_EXT_FECHO_AMP_TH_HUGE, 0x38);
+ ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_VCT_CFG6_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_NFR_TX_ABILITY;
+ set = YT8821_UTP_EXT_NFR_TX_ABILITY;
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_TXGE_NFR_FR_THP_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_PLL_SPARE_CFG;
+ set = FIELD_PREP(YT8821_UTP_EXT_PLL_SPARE_CFG, 0xe9);
+ ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_PLL_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG |
+ YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG;
+ set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG, 0x64) |
+ FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG, 0x64);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_DAC_IMID_CH_2_3_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG |
+ YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG;
+ set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG, 0x64) |
+ FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG, 0x64);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_DAC_IMID_CH_0_1_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG |
+ YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG;
+ set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG, 0x64) |
+ FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG, 0x64);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_DAC_IMSB_CH_2_3_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG |
+ YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG;
+ set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG, 0x64) |
+ FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG, 0x64);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_DAC_IMSB_CH_0_1_CTRL_REG,
+ mask, set);
+
+err_restore_page:
+ return phy_restore_page(phydev, old_page, ret);
+}
+
+/**
+ * yt8821_auto_sleep_config() - phy auto sleep config
+ * @phydev: a pointer to a &struct phy_device
+ * @enable: true enable auto sleep, false disable auto sleep
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_auto_sleep_config(struct phy_device *phydev,
+ bool enable)
+{
+ int old_page;
+ int ret = 0;
+
+ old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE);
+ if (old_page < 0) {
+ phydev_err(phydev, "Failed to select page: %d\n",
+ old_page);
+ goto err_restore_page;
+ }
+
+ ret = ytphy_modify_ext(phydev,
+ YT8521_EXTREG_SLEEP_CONTROL1_REG,
+ YT8521_ESC1R_SLEEP_SW,
+ enable ? 1 : 0);
+
+err_restore_page:
+ return phy_restore_page(phydev, old_page, ret);
+}
+
+/**
+ * yt8821_soft_reset() - soft reset utp and serdes
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_soft_reset(struct phy_device *phydev)
+{
+ return ytphy_modify_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG,
+ YT8521_CCR_SW_RST, 0);
+}
+
+/**
+ * yt8821_config_init() - phy initializatioin
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_config_init(struct phy_device *phydev)
+{
+ u8 mode = YT8821_CHIP_MODE_AUTO_BX2500_SGMII;
+ int ret;
+ u16 set;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
+ mode = YT8821_CHIP_MODE_FORCE_BX2500;
+
+ set = FIELD_PREP(YT8521_CCR_MODE_SEL_MASK, mode);
+ ret = ytphy_modify_ext_with_lock(phydev,
+ YT8521_CHIP_CONFIG_REG,
+ YT8521_CCR_MODE_SEL_MASK,
+ set);
+ if (ret < 0)
+ return ret;
+
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ phydev->possible_interfaces);
+
+ if (mode == YT8821_CHIP_MODE_AUTO_BX2500_SGMII) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ phydev->possible_interfaces);
+
+ phydev->rate_matching = RATE_MATCH_NONE;
+ } else if (mode == YT8821_CHIP_MODE_FORCE_BX2500) {
+ phydev->rate_matching = RATE_MATCH_PAUSE;
+ }
+
+ ret = yt8821_serdes_init(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = yt8821_utp_init(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* disable auto sleep */
+ ret = yt8821_auto_sleep_config(phydev, false);
+ if (ret < 0)
+ return ret;
+
+ /* soft reset */
+ return yt8821_soft_reset(phydev);
+}
+
+/**
+ * yt8821_adjust_status() - update speed and duplex to phydev
+ * @phydev: a pointer to a &struct phy_device
+ * @val: read from YTPHY_SPECIFIC_STATUS_REG
+ */
+static void yt8821_adjust_status(struct phy_device *phydev, int val)
+{
+ int speed, duplex;
+ int speed_mode;
+
+ duplex = FIELD_GET(YTPHY_SSR_DUPLEX, val);
+ speed_mode = val & YTPHY_SSR_SPEED_MASK;
+ switch (speed_mode) {
+ case YTPHY_SSR_SPEED_10M:
+ speed = SPEED_10;
+ break;
+ case YTPHY_SSR_SPEED_100M:
+ speed = SPEED_100;
+ break;
+ case YTPHY_SSR_SPEED_1000M:
+ speed = SPEED_1000;
+ break;
+ case YTPHY_SSR_SPEED_2500M:
+ speed = SPEED_2500;
+ break;
+ default:
+ speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ phydev->speed = speed;
+ phydev->duplex = duplex;
+}
+
+/**
+ * yt8821_update_interface() - update interface per current speed
+ * @phydev: a pointer to a &struct phy_device
+ */
+static void yt8821_update_interface(struct phy_device *phydev)
+{
+ if (!phydev->link)
+ return;
+
+ switch (phydev->speed) {
+ case SPEED_2500:
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+ case SPEED_1000:
+ case SPEED_100:
+ case SPEED_10:
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+ default:
+ phydev_warn(phydev, "phy speed err :%d\n", phydev->speed);
+ break;
+ }
+}
+
+/**
+ * yt8821_read_status() - determines the negotiated speed and duplex
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_read_status(struct phy_device *phydev)
+{
+ int link;
+ int ret;
+ int val;
+
+ ret = ytphy_write_ext_with_lock(phydev,
+ YT8521_REG_SPACE_SELECT_REG,
+ YT8521_RSSR_UTP_SPACE);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->autoneg_complete) {
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = phy_read(phydev, YTPHY_SPECIFIC_STATUS_REG);
+ if (ret < 0)
+ return ret;
+
+ val = ret;
+
+ link = val & YTPHY_SSR_LINK;
+ if (link)
+ yt8821_adjust_status(phydev, val);
+
+ if (link) {
+ if (phydev->link == 0)
+ phydev_dbg(phydev,
+ "%s, phy addr: %d, link up\n",
+ __func__, phydev->mdio.addr);
+ phydev->link = 1;
+ } else {
+ if (phydev->link == 1)
+ phydev_dbg(phydev,
+ "%s, phy addr: %d, link down\n",
+ __func__, phydev->mdio.addr);
+ phydev->link = 0;
+ }
+
+ val = ytphy_read_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG);
+ if (val < 0)
+ return val;
+
+ if (FIELD_GET(YT8521_CCR_MODE_SEL_MASK, val) ==
+ YT8821_CHIP_MODE_AUTO_BX2500_SGMII)
+ yt8821_update_interface(phydev);
+
+ return 0;
+}
+
+/**
+ * yt8821_modify_utp_fiber_bmcr - bits modify a PHY's BMCR register
+ * @phydev: the phy_device struct
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * NOTE: Convenience function which allows a PHY's BMCR register to be
+ * modified as new register value = (old register value & ~mask) | set.
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_modify_utp_fiber_bmcr(struct phy_device *phydev,
+ u16 mask, u16 set)
+{
+ int ret;
+
+ ret = yt8521_modify_bmcr_paged(phydev, YT8521_RSSR_UTP_SPACE,
+ mask, set);
+ if (ret < 0)
+ return ret;
+
+ return yt8521_modify_bmcr_paged(phydev, YT8521_RSSR_FIBER_SPACE,
+ mask, set);
+}
+
+/**
+ * yt8821_suspend() - suspend the hardware
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_suspend(struct phy_device *phydev)
+{
+ int wol_config;
+
+ wol_config = ytphy_read_ext_with_lock(phydev,
+ YTPHY_WOL_CONFIG_REG);
+ if (wol_config < 0)
+ return wol_config;
+
+ /* if wol enable, do nothing */
+ if (wol_config & YTPHY_WCR_ENABLE)
+ return 0;
+
+ return yt8821_modify_utp_fiber_bmcr(phydev, 0, BMCR_PDOWN);
+}
+
+/**
+ * yt8821_resume() - resume the hardware
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_resume(struct phy_device *phydev)
+{
+ int wol_config;
+ int ret;
+
+ /* disable auto sleep */
+ ret = yt8821_auto_sleep_config(phydev, false);
+ if (ret < 0)
+ return ret;
+
+ wol_config = ytphy_read_ext_with_lock(phydev,
+ YTPHY_WOL_CONFIG_REG);
+ if (wol_config < 0)
+ return wol_config;
+
+ /* if wol enable, do nothing */
+ if (wol_config & YTPHY_WCR_ENABLE)
+ return 0;
+
+ return yt8821_modify_utp_fiber_bmcr(phydev, BMCR_PDOWN, 0);
+}
+
static struct phy_driver motorcomm_phy_drvs[] = {
{
PHY_ID_MATCH_EXACT(PHY_ID_YT8511),
@@ -2307,11 +2949,28 @@ static struct phy_driver motorcomm_phy_drvs[] = {
.suspend = yt8521_suspend,
.resume = yt8521_resume,
},
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_YT8821),
+ .name = "YT8821 2.5Gbps PHY",
+ .get_features = yt8821_get_features,
+ .read_page = yt8521_read_page,
+ .write_page = yt8521_write_page,
+ .get_wol = ytphy_get_wol,
+ .set_wol = ytphy_set_wol,
+ .config_aneg = genphy_config_aneg,
+ .aneg_done = yt8821_aneg_done,
+ .config_init = yt8821_config_init,
+ .get_rate_matching = yt8821_get_rate_matching,
+ .read_status = yt8821_read_status,
+ .soft_reset = yt8821_soft_reset,
+ .suspend = yt8821_suspend,
+ .resume = yt8821_resume,
+ },
};
module_phy_driver(motorcomm_phy_drvs);
-MODULE_DESCRIPTION("Motorcomm 8511/8521/8531/8531S PHY driver");
+MODULE_DESCRIPTION("Motorcomm 8511/8521/8531/8531S/8821 PHY driver");
MODULE_AUTHOR("Peter Geis");
MODULE_AUTHOR("Frank");
MODULE_LICENSE("GPL");
@@ -2321,6 +2980,7 @@ static const struct mdio_device_id __maybe_unused motorcomm_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8521) },
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8531) },
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8531S) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_YT8821) },
{ /* sentinel */ }
};
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index c1ddae36a2ae..738a8822fcf0 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -15,7 +15,7 @@
#include <linux/ptp_classify.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/udp.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "mscc.h"
#include "mscc_ptp.h"
diff --git a/drivers/net/phy/open_alliance_helpers.c b/drivers/net/phy/open_alliance_helpers.c
new file mode 100644
index 000000000000..36a70451d7da
--- /dev/null
+++ b/drivers/net/phy/open_alliance_helpers.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * open_alliance_helpers.c - OPEN Alliance specific PHY diagnostic helpers
+ *
+ * This file contains helper functions for implementing advanced diagnostic
+ * features as specified by the OPEN Alliance for automotive Ethernet PHYs.
+ * These helpers include functionality for Time Delay Reflection (TDR), dynamic
+ * channel quality assessment, and other PHY diagnostics.
+ *
+ * For more information on the specifications, refer to the OPEN Alliance
+ * documentation: https://opensig.org/automotive-ethernet-specifications/
+ * Currently following specifications are partially or fully implemented:
+ * - Advanced diagnostic features for 1000BASE-T1 automotive Ethernet PHYs.
+ * TC12 - advanced PHY features.
+ * https://opensig.org/wp-content/uploads/2024/03/Advanced_PHY_features_for_automotive_Ethernet_v2.0_fin.pdf
+ */
+
+#include <linux/bitfield.h>
+#include <linux/ethtool_netlink.h>
+
+#include "open_alliance_helpers.h"
+
+/**
+ * oa_1000bt1_get_ethtool_cable_result_code - Convert TDR status to ethtool
+ * result code
+ * @reg_value: Value read from the TDR register
+ *
+ * This function takes a register value from the HDD.TDR register and converts
+ * the TDR status to the corresponding ethtool cable test result code.
+ *
+ * Return: The appropriate ethtool result code based on the TDR status
+ */
+int oa_1000bt1_get_ethtool_cable_result_code(u16 reg_value)
+{
+ u8 tdr_status = FIELD_GET(OA_1000BT1_HDD_TDR_STATUS_MASK, reg_value);
+ u8 dist_val = FIELD_GET(OA_1000BT1_HDD_TDR_DISTANCE_MASK, reg_value);
+
+ switch (tdr_status) {
+ case OA_1000BT1_HDD_TDR_STATUS_CABLE_OK:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case OA_1000BT1_HDD_TDR_STATUS_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case OA_1000BT1_HDD_TDR_STATUS_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case OA_1000BT1_HDD_TDR_STATUS_NOISE:
+ return ETHTOOL_A_CABLE_RESULT_CODE_NOISE;
+ default:
+ if (dist_val == OA_1000BT1_HDD_TDR_DISTANCE_RESOLUTION_NOT_POSSIBLE)
+ return ETHTOOL_A_CABLE_RESULT_CODE_RESOLUTION_NOT_POSSIBLE;
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+EXPORT_SYMBOL_GPL(oa_1000bt1_get_ethtool_cable_result_code);
+
+/**
+ * oa_1000bt1_get_tdr_distance - Get distance to the main fault from TDR
+ * register value
+ * @reg_value: Value read from the TDR register
+ *
+ * This function takes a register value from the HDD.TDR register and extracts
+ * the distance to the main fault detected by the TDR feature. The distance is
+ * measured in centimeters and ranges from 0 to 3100 centimeters. If the
+ * distance is not available (0x3f), the function returns -ERANGE.
+ *
+ * Return: The distance to the main fault in centimeters, or -ERANGE if the
+ * resolution is not possible.
+ */
+int oa_1000bt1_get_tdr_distance(u16 reg_value)
+{
+ u8 dist_val = FIELD_GET(OA_1000BT1_HDD_TDR_DISTANCE_MASK, reg_value);
+
+ if (dist_val == OA_1000BT1_HDD_TDR_DISTANCE_RESOLUTION_NOT_POSSIBLE)
+ return -ERANGE;
+
+ return dist_val * 100;
+}
+EXPORT_SYMBOL_GPL(oa_1000bt1_get_tdr_distance);
diff --git a/drivers/net/phy/open_alliance_helpers.h b/drivers/net/phy/open_alliance_helpers.h
new file mode 100644
index 000000000000..8b7d97bc6f18
--- /dev/null
+++ b/drivers/net/phy/open_alliance_helpers.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef OPEN_ALLIANCE_HELPERS_H
+#define OPEN_ALLIANCE_HELPERS_H
+
+/*
+ * These defines reflect the TDR (Time Delay Reflection) diagnostic feature
+ * for 1000BASE-T1 automotive Ethernet PHYs as specified by the OPEN Alliance.
+ *
+ * The register values are part of the HDD.TDR register, which provides
+ * information about the cable status and faults. The exact register offset
+ * is device-specific and should be provided by the driver.
+ */
+#define OA_1000BT1_HDD_TDR_ACTIVATION_MASK GENMASK(1, 0)
+#define OA_1000BT1_HDD_TDR_ACTIVATION_OFF 1
+#define OA_1000BT1_HDD_TDR_ACTIVATION_ON 2
+
+#define OA_1000BT1_HDD_TDR_STATUS_MASK GENMASK(7, 4)
+#define OA_1000BT1_HDD_TDR_STATUS_SHORT 3
+#define OA_1000BT1_HDD_TDR_STATUS_OPEN 6
+#define OA_1000BT1_HDD_TDR_STATUS_NOISE 5
+#define OA_1000BT1_HDD_TDR_STATUS_CABLE_OK 7
+#define OA_1000BT1_HDD_TDR_STATUS_TEST_IN_PROGRESS 8
+#define OA_1000BT1_HDD_TDR_STATUS_TEST_NOT_POSSIBLE 13
+
+/*
+ * OA_1000BT1_HDD_TDR_DISTANCE_MASK:
+ * This mask is used to extract the distance to the first/main fault
+ * detected by the TDR feature. Each bit represents an approximate distance
+ * of 1 meter, ranging from 0 to 31 meters. The exact interpretation of the
+ * bits may vary, but generally:
+ * 000000 = no error
+ * 000001 = error about 0-1m away
+ * 000010 = error between 1-2m away
+ * ...
+ * 011111 = error about 30-31m away
+ * 111111 = resolution not possible / out of distance
+ */
+#define OA_1000BT1_HDD_TDR_DISTANCE_MASK GENMASK(13, 8)
+#define OA_1000BT1_HDD_TDR_DISTANCE_NO_ERROR 0
+#define OA_1000BT1_HDD_TDR_DISTANCE_RESOLUTION_NOT_POSSIBLE 0x3f
+
+int oa_1000bt1_get_ethtool_cable_result_code(u16 reg_value);
+int oa_1000bt1_get_tdr_distance(u16 reg_value);
+
+#endif /* OPEN_ALLIANCE_HELPERS_H */
+
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 785182fa5fe0..4f3e742907cb 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -342,14 +342,19 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
if (mdio_phy_id_is_c45(mii_data->phy_id)) {
prtad = mdio_phy_id_prtad(mii_data->phy_id);
devad = mdio_phy_id_devad(mii_data->phy_id);
- mii_data->val_out = mdiobus_c45_read(
- phydev->mdio.bus, prtad, devad,
- mii_data->reg_num);
+ ret = mdiobus_c45_read(phydev->mdio.bus, prtad, devad,
+ mii_data->reg_num);
+
} else {
- mii_data->val_out = mdiobus_read(
- phydev->mdio.bus, mii_data->phy_id,
- mii_data->reg_num);
+ ret = mdiobus_read(phydev->mdio.bus, mii_data->phy_id,
+ mii_data->reg_num);
}
+
+ if (ret < 0)
+ return ret;
+
+ mii_data->val_out = ret;
+
return 0;
case SIOCSMIIREG:
@@ -1089,7 +1094,10 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
+ if (autoneg == AUTONEG_ENABLE &&
+ (linkmode_empty(advertising) ||
+ !linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported)))
return -EINVAL;
if (autoneg == AUTONEG_DISABLE &&
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 7752e9386b40..499797646580 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -29,6 +29,7 @@
#include <linux/phy.h>
#include <linux/phylib_stubs.h>
#include <linux/phy_led_triggers.h>
+#include <linux/phy_link_topology.h>
#include <linux/pse-pd/pse.h>
#include <linux/property.h>
#include <linux/rtnetlink.h>
@@ -279,6 +280,15 @@ static struct phy_driver genphy_driver;
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
+static bool phy_drv_wol_enabled(struct phy_device *phydev)
+{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phy_ethtool_get_wol(phydev, &wol);
+
+ return wol.wolopts != 0;
+}
+
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
{
struct device_driver *drv = phydev->mdio.dev.driver;
@@ -288,6 +298,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (!drv || !phydrv->suspend)
return false;
+ /* If the PHY on the mido bus is not attached but has WOL enabled
+ * we cannot suspend the PHY.
+ */
+ if (!netdev && phy_drv_wol_enabled(phydev))
+ return false;
+
/* PHY not attached? May suspend if the PHY has not already been
* suspended as part of a prior call to phy_disconnect() ->
* phy_detach() -> phy_suspend() because the parent netdev might be the
@@ -1370,6 +1386,48 @@ phy_standalone_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(phy_standalone);
/**
+ * phy_sfp_connect_phy - Connect the SFP module's PHY to the upstream PHY
+ * @upstream: pointer to the upstream phy device
+ * @phy: pointer to the SFP module's phy device
+ *
+ * This helper allows keeping track of PHY devices on the link. It adds the
+ * SFP module's phy to the phy namespace of the upstream phy
+ *
+ * Return: 0 on success, otherwise a negative error code.
+ */
+int phy_sfp_connect_phy(void *upstream, struct phy_device *phy)
+{
+ struct phy_device *phydev = upstream;
+ struct net_device *dev = phydev->attached_dev;
+
+ if (dev)
+ return phy_link_topo_add_phy(dev, phy, PHY_UPSTREAM_PHY, phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_sfp_connect_phy);
+
+/**
+ * phy_sfp_disconnect_phy - Disconnect the SFP module's PHY from the upstream PHY
+ * @upstream: pointer to the upstream phy device
+ * @phy: pointer to the SFP module's phy device
+ *
+ * This helper allows keeping track of PHY devices on the link. It removes the
+ * SFP module's phy to the phy namespace of the upstream phy. As the module phy
+ * will be destroyed, re-inserting the same module will add a new phy with a
+ * new index.
+ */
+void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy)
+{
+ struct phy_device *phydev = upstream;
+ struct net_device *dev = phydev->attached_dev;
+
+ if (dev)
+ phy_link_topo_del_phy(dev, phy);
+}
+EXPORT_SYMBOL(phy_sfp_disconnect_phy);
+
+/**
* phy_sfp_attach - attach the SFP bus to the PHY upstream network device
* @upstream: pointer to the phy device
* @bus: sfp bus representing cage being attached
@@ -1511,6 +1569,10 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (phydev->sfp_bus_attached)
dev->sfp_bus = phydev->sfp_bus;
+
+ err = phy_link_topo_add_phy(dev, phydev, PHY_UPSTREAM_MAC, dev);
+ if (err)
+ goto error;
}
/* Some Ethernet drivers try to connect to a PHY device before
@@ -1938,6 +2000,7 @@ void phy_detach(struct phy_device *phydev)
if (dev) {
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
+ phy_link_topo_del_phy(dev, phydev);
}
phydev->phylink = NULL;
@@ -1975,7 +2038,6 @@ EXPORT_SYMBOL(phy_detach);
int phy_suspend(struct phy_device *phydev)
{
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
struct net_device *netdev = phydev->attached_dev;
const struct phy_driver *phydrv = phydev->drv;
int ret;
@@ -1983,8 +2045,7 @@ int phy_suspend(struct phy_device *phydev)
if (phydev->suspended || !phydrv)
return 0;
- phy_ethtool_get_wol(phydev, &wol);
- phydev->wol_enabled = wol.wolopts ||
+ phydev->wol_enabled = phy_drv_wol_enabled(phydev) ||
(netdev && netdev->ethtool->wol_enabled);
/* If the device has WOL enabled, we cannot suspend the PHY */
if (phydev->wol_enabled && !(phydrv->flags & PHY_ALWAYS_CALL_SUSPEND))
@@ -2095,22 +2156,20 @@ EXPORT_SYMBOL(phy_reset_after_clk_enable);
/**
* genphy_config_advert - sanitize and advertise auto-negotiation parameters
* @phydev: target phy_device struct
+ * @advert: auto-negotiation parameters to advertise
*
* Description: Writes MII_ADVERTISE with the appropriate values,
* after sanitizing the values to make sure we only advertise
* what is supported. Returns < 0 on error, 0 if the PHY's advertisement
* hasn't changed, and > 0 if it has changed.
*/
-static int genphy_config_advert(struct phy_device *phydev)
+static int genphy_config_advert(struct phy_device *phydev,
+ const unsigned long *advert)
{
int err, bmsr, changed = 0;
u32 adv;
- /* Only allow advertising what this PHY supports */
- linkmode_and(phydev->advertising, phydev->advertising,
- phydev->supported);
-
- adv = linkmode_adv_to_mii_adv_t(phydev->advertising);
+ adv = linkmode_adv_to_mii_adv_t(advert);
/* Setup standard advertisement */
err = phy_modify_changed(phydev, MII_ADVERTISE,
@@ -2133,7 +2192,7 @@ static int genphy_config_advert(struct phy_device *phydev)
if (!(bmsr & BMSR_ESTATEN))
return changed;
- adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
+ adv = linkmode_adv_to_mii_ctrl1000_t(advert);
err = phy_modify_changed(phydev, MII_CTRL1000,
ADVERTISE_1000FULL | ADVERTISE_1000HALF,
@@ -2357,6 +2416,9 @@ EXPORT_SYMBOL(genphy_check_and_restart_aneg);
*/
int __genphy_config_aneg(struct phy_device *phydev, bool changed)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(fixed_advert);
+ const struct phy_setting *set;
+ unsigned long *advert;
int err;
err = genphy_c45_an_config_eee_aneg(phydev);
@@ -2371,10 +2433,25 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
else if (err)
changed = true;
- if (AUTONEG_ENABLE != phydev->autoneg)
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ /* Only allow advertising what this PHY supports */
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+ advert = phydev->advertising;
+ } else if (phydev->speed < SPEED_1000) {
return genphy_setup_forced(phydev);
+ } else {
+ linkmode_zero(fixed_advert);
+
+ set = phy_lookup_setting(phydev->speed, phydev->duplex,
+ phydev->supported, true);
+ if (set)
+ linkmode_set_bit(set->bit, fixed_advert);
- err = genphy_config_advert(phydev);
+ advert = fixed_advert;
+ }
+
+ err = genphy_config_advert(phydev, advert);
if (err < 0) /* error */
return err;
else if (err)
@@ -3249,10 +3326,11 @@ static __maybe_unused int phy_led_hw_is_supported(struct led_classdev *led_cdev,
static void phy_leds_unregister(struct phy_device *phydev)
{
- struct phy_led *phyled;
+ struct phy_led *phyled, *tmp;
- list_for_each_entry(phyled, &phydev->leds, list) {
+ list_for_each_entry_safe(phyled, tmp, &phydev->leds, list) {
led_classdev_unregister(&phyled->led_cdev);
+ list_del(&phyled->list);
}
}
@@ -3330,7 +3408,7 @@ static int of_phy_led(struct phy_device *phydev,
static int of_phy_leds(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
- struct device_node *leds, *led;
+ struct device_node *leds;
int err;
if (!IS_ENABLED(CONFIG_OF_MDIO))
@@ -3343,15 +3421,16 @@ static int of_phy_leds(struct phy_device *phydev)
if (!leds)
return 0;
- for_each_available_child_of_node(leds, led) {
+ for_each_available_child_of_node_scoped(leds, led) {
err = of_phy_led(phydev, led);
if (err) {
- of_node_put(led);
+ of_node_put(leds);
phy_leds_unregister(phydev);
return err;
}
}
+ of_node_put(leds);
return 0;
}
diff --git a/drivers/net/phy/phy_link_topology.c b/drivers/net/phy/phy_link_topology.c
new file mode 100644
index 000000000000..4a5d73002a1a
--- /dev/null
+++ b/drivers/net/phy/phy_link_topology.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Infrastructure to handle all PHY devices connected to a given netdev,
+ * either directly or indirectly attached.
+ *
+ * Copyright (c) 2023 Maxime Chevallier<maxime.chevallier@bootlin.com>
+ */
+
+#include <linux/phy_link_topology.h>
+#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include <linux/xarray.h>
+
+static int netdev_alloc_phy_link_topology(struct net_device *dev)
+{
+ struct phy_link_topology *topo;
+
+ topo = kzalloc(sizeof(*topo), GFP_KERNEL);
+ if (!topo)
+ return -ENOMEM;
+
+ xa_init_flags(&topo->phys, XA_FLAGS_ALLOC1);
+ topo->next_phy_index = 1;
+
+ dev->link_topo = topo;
+
+ return 0;
+}
+
+int phy_link_topo_add_phy(struct net_device *dev,
+ struct phy_device *phy,
+ enum phy_upstream upt, void *upstream)
+{
+ struct phy_link_topology *topo = dev->link_topo;
+ struct phy_device_node *pdn;
+ int ret;
+
+ if (!topo) {
+ ret = netdev_alloc_phy_link_topology(dev);
+ if (ret)
+ return ret;
+
+ topo = dev->link_topo;
+ }
+
+ pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
+ if (!pdn)
+ return -ENOMEM;
+
+ pdn->phy = phy;
+ switch (upt) {
+ case PHY_UPSTREAM_MAC:
+ pdn->upstream.netdev = (struct net_device *)upstream;
+ if (phy_on_sfp(phy))
+ pdn->parent_sfp_bus = pdn->upstream.netdev->sfp_bus;
+ break;
+ case PHY_UPSTREAM_PHY:
+ pdn->upstream.phydev = (struct phy_device *)upstream;
+ if (phy_on_sfp(phy))
+ pdn->parent_sfp_bus = pdn->upstream.phydev->sfp_bus;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ pdn->upstream_type = upt;
+
+ /* Attempt to re-use a previously allocated phy_index */
+ if (phy->phyindex)
+ ret = xa_insert(&topo->phys, phy->phyindex, pdn, GFP_KERNEL);
+ else
+ ret = xa_alloc_cyclic(&topo->phys, &phy->phyindex, pdn,
+ xa_limit_32b, &topo->next_phy_index,
+ GFP_KERNEL);
+
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(pdn);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_link_topo_add_phy);
+
+void phy_link_topo_del_phy(struct net_device *dev,
+ struct phy_device *phy)
+{
+ struct phy_link_topology *topo = dev->link_topo;
+ struct phy_device_node *pdn;
+
+ if (!topo)
+ return;
+
+ pdn = xa_erase(&topo->phys, phy->phyindex);
+
+ /* We delete the PHY from the topology, however we don't re-set the
+ * phy->phyindex field. If the PHY isn't gone, we can re-assign it the
+ * same index next time it's added back to the topology
+ */
+
+ kfree(pdn);
+}
+EXPORT_SYMBOL_GPL(phy_link_topo_del_phy);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 51c526d227fa..4309317de3d1 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1636,6 +1636,48 @@ static int phylink_register_sfp(struct phylink *pl,
}
/**
+ * phylink_set_fixed_link() - set the fixed link
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @state: a pointer to a struct phylink_link_state.
+ *
+ * This function is used when the link parameters are known and do not change,
+ * making it suitable for certain types of network connections.
+ *
+ * Returns: zero on success or negative error code.
+ */
+int phylink_set_fixed_link(struct phylink *pl,
+ const struct phylink_link_state *state)
+{
+ const struct phy_setting *s;
+ unsigned long *adv;
+
+ if (pl->cfg_link_an_mode != MLO_AN_PHY || !state ||
+ !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
+ return -EINVAL;
+
+ s = phy_lookup_setting(state->speed, state->duplex,
+ pl->supported, true);
+ if (!s)
+ return -EINVAL;
+
+ adv = pl->link_config.advertising;
+ linkmode_zero(adv);
+ linkmode_set_bit(s->bit, adv);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv);
+
+ pl->link_config.speed = state->speed;
+ pl->link_config.duplex = state->duplex;
+ pl->link_config.link = 1;
+ pl->link_config.an_complete = 1;
+
+ pl->cfg_link_an_mode = MLO_AN_FIXED;
+ pl->cur_link_an_mode = pl->cfg_link_an_mode;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phylink_set_fixed_link);
+
+/**
* phylink_create() - create a phylink instance
* @config: a pointer to the target &struct phylink_config
* @fwnode: a pointer to a &struct fwnode_handle describing the network
@@ -3423,7 +3465,8 @@ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
return ret;
}
-static void phylink_sfp_disconnect_phy(void *upstream)
+static void phylink_sfp_disconnect_phy(void *upstream,
+ struct phy_device *phydev)
{
phylink_disconnect_phy(upstream);
}
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index c8f83e5f78ab..105602581a03 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -770,6 +770,8 @@ static const struct sfp_upstream_ops at8031_sfp_ops = {
.attach = phy_sfp_attach,
.detach = phy_sfp_detach,
.module_insert = at8031_sfp_insert,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
};
static int at8031_parse_dt(struct phy_device *phydev)
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index 672c6929119a..bd8a51ec0ecd 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -699,6 +699,8 @@ static const struct sfp_upstream_ops qca807x_sfp_ops = {
.detach = phy_sfp_detach,
.module_insert = qca807x_sfp_insert,
.module_remove = qca807x_sfp_remove,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
};
static int qca807x_probe(struct phy_device *phydev)
@@ -733,16 +735,6 @@ static int qca807x_probe(struct phy_device *phydev)
"qcom,dac-disable-bias-current-tweak");
#if IS_ENABLED(CONFIG_GPIOLIB)
- /* Make sure we don't have mixed leds node and gpio-controller
- * to prevent registering leds and having gpio-controller usage
- * conflicting with them.
- */
- if (of_find_property(node, "leds", NULL) &&
- of_find_property(node, "gpio-controller", NULL)) {
- phydev_err(phydev, "Invalid property detected. LEDs and gpio-controller are mutually exclusive.");
- return -EINVAL;
- }
-
/* Do not register a GPIO controller unless flagged for it */
if (of_property_read_bool(node, "gpio-controller")) {
ret = qca807x_gpio(phydev);
diff --git a/drivers/net/phy/qcom/qca83xx.c b/drivers/net/phy/qcom/qca83xx.c
index 5d083ef0250e..a05d0df6fa16 100644
--- a/drivers/net/phy/qcom/qca83xx.c
+++ b/drivers/net/phy/qcom/qca83xx.c
@@ -15,7 +15,6 @@
#define QCA8327_A_PHY_ID 0x004dd033
#define QCA8327_B_PHY_ID 0x004dd034
#define QCA8337_PHY_ID 0x004dd036
-#define QCA8K_PHY_ID_MASK 0xffffffff
#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
@@ -216,8 +215,7 @@ static int qca8327_suspend(struct phy_device *phydev)
static struct phy_driver qca83xx_driver[] = {
{
/* QCA8337 */
- .phy_id = QCA8337_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
+ PHY_ID_MATCH_EXACT(QCA8337_PHY_ID),
.name = "Qualcomm Atheros 8337 internal PHY",
/* PHY_GBIT_FEATURES */
.probe = qca83xx_probe,
@@ -231,8 +229,7 @@ static struct phy_driver qca83xx_driver[] = {
.resume = qca83xx_resume,
}, {
/* QCA8327-A from switch QCA8327-AL1A */
- .phy_id = QCA8327_A_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
+ PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID),
.name = "Qualcomm Atheros 8327-A internal PHY",
/* PHY_GBIT_FEATURES */
.link_change_notify = qca83xx_link_change_notify,
@@ -247,8 +244,7 @@ static struct phy_driver qca83xx_driver[] = {
.resume = qca83xx_resume,
}, {
/* QCA8327-B from switch QCA8327-BL1A */
- .phy_id = QCA8327_B_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
+ PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID),
.name = "Qualcomm Atheros 8327-B internal PHY",
/* PHY_GBIT_FEATURES */
.link_change_notify = qca83xx_link_change_notify,
diff --git a/drivers/net/phy/qt2025.rs b/drivers/net/phy/qt2025.rs
new file mode 100644
index 000000000000..1ab065798175
--- /dev/null
+++ b/drivers/net/phy/qt2025.rs
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) Tehuti Networks Ltd.
+// Copyright (C) 2024 FUJITA Tomonori <fujita.tomonori@gmail.com>
+
+//! Applied Micro Circuits Corporation QT2025 PHY driver
+//!
+//! This driver is based on the vendor driver `QT2025_phy.c`. This source
+//! and firmware can be downloaded on the EN-9320SFP+ support site.
+//!
+//! The QT2025 PHY integrates an Intel 8051 micro-controller.
+
+use kernel::c_str;
+use kernel::error::code;
+use kernel::firmware::Firmware;
+use kernel::net::phy::{
+ self,
+ reg::{Mmd, C45},
+ Driver,
+};
+use kernel::prelude::*;
+use kernel::sizes::{SZ_16K, SZ_8K};
+
+kernel::module_phy_driver! {
+ drivers: [PhyQT2025],
+ device_table: [
+ phy::DeviceId::new_with_driver::<PhyQT2025>(),
+ ],
+ name: "qt2025_phy",
+ author: "FUJITA Tomonori <fujita.tomonori@gmail.com>",
+ description: "AMCC QT2025 PHY driver",
+ license: "GPL",
+ firmware: ["qt2025-2.0.3.3.fw"],
+}
+
+struct PhyQT2025;
+
+#[vtable]
+impl Driver for PhyQT2025 {
+ const NAME: &'static CStr = c_str!("QT2025 10Gpbs SFP+");
+ const PHY_DEVICE_ID: phy::DeviceId = phy::DeviceId::new_with_exact_mask(0x0043a400);
+
+ fn probe(dev: &mut phy::Device) -> Result<()> {
+ // Check the hardware revision code.
+ // Only 0x3b works with this driver and firmware.
+ let hw_rev = dev.read(C45::new(Mmd::PMAPMD, 0xd001))?;
+ if (hw_rev >> 8) != 0xb3 {
+ return Err(code::ENODEV);
+ }
+
+ // `MICRO_RESETN`: hold the micro-controller in reset while configuring.
+ dev.write(C45::new(Mmd::PMAPMD, 0xc300), 0x0000)?;
+ // `SREFCLK_FREQ`: configure clock frequency of the micro-controller.
+ dev.write(C45::new(Mmd::PMAPMD, 0xc302), 0x0004)?;
+ // Non loopback mode.
+ dev.write(C45::new(Mmd::PMAPMD, 0xc319), 0x0038)?;
+ // `CUS_LAN_WAN_CONFIG`: select between LAN and WAN (WIS) mode.
+ dev.write(C45::new(Mmd::PMAPMD, 0xc31a), 0x0098)?;
+ // The following writes use standardized registers (3.38 through
+ // 3.41 5/10/25GBASE-R PCS test pattern seed B) for something else.
+ // We don't know what.
+ dev.write(C45::new(Mmd::PCS, 0x0026), 0x0e00)?;
+ dev.write(C45::new(Mmd::PCS, 0x0027), 0x0893)?;
+ dev.write(C45::new(Mmd::PCS, 0x0028), 0xa528)?;
+ dev.write(C45::new(Mmd::PCS, 0x0029), 0x0003)?;
+ // Configure transmit and recovered clock.
+ dev.write(C45::new(Mmd::PMAPMD, 0xa30a), 0x06e1)?;
+ // `MICRO_RESETN`: release the micro-controller from the reset state.
+ dev.write(C45::new(Mmd::PMAPMD, 0xc300), 0x0002)?;
+ // The micro-controller will start running from the boot ROM.
+ dev.write(C45::new(Mmd::PCS, 0xe854), 0x00c0)?;
+
+ let fw = Firmware::request(c_str!("qt2025-2.0.3.3.fw"), dev.as_ref())?;
+ if fw.data().len() > SZ_16K + SZ_8K {
+ return Err(code::EFBIG);
+ }
+
+ // The 24kB of program memory space is accessible by MDIO.
+ // The first 16kB of memory is located in the address range 3.8000h - 3.BFFFh.
+ // The next 8kB of memory is located at 4.8000h - 4.9FFFh.
+ let mut dst_offset = 0;
+ let mut dst_mmd = Mmd::PCS;
+ for (src_idx, val) in fw.data().iter().enumerate() {
+ if src_idx == SZ_16K {
+ // Start writing to the next register with no offset
+ dst_offset = 0;
+ dst_mmd = Mmd::PHYXS;
+ }
+
+ dev.write(C45::new(dst_mmd, 0x8000 + dst_offset), (*val).into())?;
+
+ dst_offset += 1;
+ }
+ // The micro-controller will start running from SRAM.
+ dev.write(C45::new(Mmd::PCS, 0xe854), 0x0040)?;
+
+ // TODO: sleep here until the hw becomes ready.
+ Ok(())
+ }
+
+ fn read_status(dev: &mut phy::Device) -> Result<u16> {
+ dev.genphy_read_status::<C45>()
+ }
+}
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 87865918dab6..166f6a728373 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -527,6 +527,9 @@ static int rtl8211f_led_hw_control_get(struct phy_device *phydev, u8 index,
{
int val;
+ if (index >= RTL8211F_LED_COUNT)
+ return -EINVAL;
+
val = phy_read_paged(phydev, 0xd04, RTL8211F_LEDCR);
if (val < 0)
return val;
@@ -555,7 +558,7 @@ static int rtl8211f_led_hw_control_set(struct phy_device *phydev, u8 index,
unsigned long rules)
{
const u16 mask = RTL8211F_LEDCR_MASK << (RTL8211F_LEDCR_SHIFT * index);
- u16 reg = RTL8211F_LEDCR_MODE; /* Mode B */
+ u16 reg = 0;
if (index >= RTL8211F_LED_COUNT)
return -EINVAL;
@@ -575,6 +578,7 @@ static int rtl8211f_led_hw_control_set(struct phy_device *phydev, u8 index,
}
reg <<= RTL8211F_LEDCR_SHIFT * index;
+ reg |= RTL8211F_LEDCR_MODE; /* Mode B */
return phy_modify_paged(phydev, 0xd04, RTL8211F_LEDCR, mask, reg);
}
@@ -1077,6 +1081,16 @@ static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev)
return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true);
}
+static int rtl8251b_c22_match_phy_device(struct phy_device *phydev)
+{
+ return rtlgen_is_c45_match(phydev, RTL_8251B, false);
+}
+
+static int rtl8251b_c45_match_phy_device(struct phy_device *phydev)
+{
+ return rtlgen_is_c45_match(phydev, RTL_8251B, true);
+}
+
static int rtlgen_resume(struct phy_device *phydev)
{
int ret = genphy_resume(phydev);
@@ -1414,7 +1428,7 @@ static struct phy_driver realtek_drvs[] = {
.suspend = genphy_c45_pma_suspend,
.resume = rtlgen_c45_resume,
}, {
- PHY_ID_MATCH_EXACT(0x001cc862),
+ .match_phy_device = rtl8251b_c45_match_phy_device,
.name = "RTL8251B 5Gbps PHY",
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
@@ -1424,6 +1438,18 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ .match_phy_device = rtl8251b_c22_match_phy_device,
+ .name = "RTL8126A-internal 5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ .read_mmd = rtl822x_read_mmd,
+ .write_mmd = rtl822x_write_mmd,
+ }, {
PHY_ID_MATCH_EXACT(0x001ccad0),
.name = "RTL8224 2.5Gbps PHY",
.get_features = rtl822x_c45_get_features,
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 2f44fc51848f..f13c00b5b449 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -487,7 +487,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
bus->socket_ops->stop(bus->sfp);
bus->socket_ops->detach(bus->sfp);
if (bus->phydev && ops && ops->disconnect_phy)
- ops->disconnect_phy(bus->upstream);
+ ops->disconnect_phy(bus->upstream, bus->phydev);
}
bus->registered = false;
}
@@ -722,6 +722,28 @@ void sfp_bus_del_upstream(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_bus_del_upstream);
+/**
+ * sfp_get_name() - Get the SFP device name
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Gets the SFP device's name, if @bus has a registered socket. Callers must
+ * hold RTNL, and the returned name is only valid until RTNL is released.
+ *
+ * Returns:
+ * - The name of the SFP device registered with sfp_register_socket()
+ * - %NULL if no device was registered on @bus
+ */
+const char *sfp_get_name(struct sfp_bus *bus)
+{
+ ASSERT_RTNL();
+
+ if (bus->sfp_dev)
+ return dev_name(bus->sfp_dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(sfp_get_name);
+
/* Socket driver entry points */
int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev)
{
@@ -743,7 +765,7 @@ void sfp_remove_phy(struct sfp_bus *bus)
const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
if (ops && ops->disconnect_phy)
- ops->disconnect_phy(bus->upstream);
+ ops->disconnect_phy(bus->upstream, bus->phydev);
bus->phydev = NULL;
}
EXPORT_SYMBOL_GPL(sfp_remove_phy);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 897b979ec03c..2377179de017 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -10,8 +10,10 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/bitfield.h>
/* Vitesse Extended Page Magic Register(s) */
+#define MII_VSC73XX_EXT_PAGE_1E 0x01
#define MII_VSC82X4_EXT_PAGE_16E 0x10
#define MII_VSC82X4_EXT_PAGE_17E 0x11
#define MII_VSC82X4_EXT_PAGE_18E 0x12
@@ -60,6 +62,28 @@
/* Vitesse Extended Page Access Register */
#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f
+/* Vitesse VSC73XX Extended Control Register */
+#define MII_VSC73XX_PHY_CTRL_EXT3 0x14
+
+#define MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN BIT(4)
+#define MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT GENMASK(3, 2)
+#define MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_STA BIT(1)
+#define MII_VSC73XX_DOWNSHIFT_MAX 5
+#define MII_VSC73XX_DOWNSHIFT_INVAL 1
+
+/* VSC73XX PHY_BYPASS_CTRL register*/
+#define MII_VSC73XX_PHY_BYPASS_CTRL MII_DCOUNTER
+#define MII_VSC73XX_PBC_TX_DIS BIT(15)
+#define MII_VSC73XX_PBC_FOR_SPD_AUTO_MDIX_DIS BIT(7)
+#define MII_VSC73XX_PBC_PAIR_SWAP_DIS BIT(5)
+#define MII_VSC73XX_PBC_POL_INV_DIS BIT(4)
+#define MII_VSC73XX_PBC_PARALLEL_DET_DIS BIT(3)
+#define MII_VSC73XX_PBC_AUTO_NP_EXCHANGE_DIS BIT(1)
+
+/* VSC73XX PHY_AUX_CTRL_STAT register */
+#define MII_VSC73XX_PHY_AUX_CTRL_STAT MII_NCONFIG
+#define MII_VSC73XX_PACS_NO_MDI_X_IND BIT(13)
+
/* Vitesse VSC8601 Extended PHY Control Register 1 */
#define MII_VSC8601_EPHY_CTL 0x17
#define MII_VSC8601_EPHY_CTL_RGMII_SKEW (1 << 8)
@@ -128,6 +152,74 @@ static int vsc73xx_write_page(struct phy_device *phydev, int page)
return __phy_write(phydev, VSC73XX_EXT_PAGE_ACCESS, page);
}
+static int vsc73xx_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, enable, cnt;
+
+ val = phy_read_paged(phydev, MII_VSC73XX_EXT_PAGE_1E,
+ MII_VSC73XX_PHY_CTRL_EXT3);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT, val) + 2;
+
+ *data = enable ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int vsc73xx_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ u16 mask, val;
+ int ret;
+
+ if (cnt > MII_VSC73XX_DOWNSHIFT_MAX)
+ return -E2BIG;
+ else if (cnt == MII_VSC73XX_DOWNSHIFT_INVAL)
+ return -EINVAL;
+
+ mask = MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN;
+
+ if (!cnt) {
+ val = 0;
+ } else {
+ mask |= MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT;
+ val = MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN |
+ FIELD_PREP(MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT,
+ cnt - 2);
+ }
+
+ ret = phy_modify_paged(phydev, MII_VSC73XX_EXT_PAGE_1E,
+ MII_VSC73XX_PHY_CTRL_EXT3, mask, val);
+ if (ret < 0)
+ return ret;
+
+ return genphy_soft_reset(phydev);
+}
+
+static int vsc73xx_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return vsc73xx_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int vsc73xx_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return vsc73xx_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static void vsc73xx_config_init(struct phy_device *phydev)
{
/* Receiver init */
@@ -137,6 +229,12 @@ static void vsc73xx_config_init(struct phy_device *phydev)
/* Config LEDs 0x61 */
phy_modify(phydev, MII_TPISTATUS, 0xff00, 0x0061);
+
+ /* Enable downshift by default */
+ vsc73xx_set_downshift(phydev, MII_VSC73XX_DOWNSHIFT_MAX);
+
+ /* Set Auto MDI-X by default */
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
}
static int vsc738x_config_init(struct phy_device *phydev)
@@ -237,16 +335,75 @@ static int vsc739x_config_init(struct phy_device *phydev)
return 0;
}
+static int vsc73xx_mdix_set(struct phy_device *phydev, u8 mdix)
+{
+ int ret;
+ u16 val;
+
+ val = phy_read(phydev, MII_VSC73XX_PHY_BYPASS_CTRL);
+
+ switch (mdix) {
+ case ETH_TP_MDI:
+ val |= MII_VSC73XX_PBC_FOR_SPD_AUTO_MDIX_DIS |
+ MII_VSC73XX_PBC_PAIR_SWAP_DIS |
+ MII_VSC73XX_PBC_POL_INV_DIS;
+ break;
+ case ETH_TP_MDI_X:
+ /* When MDI-X auto configuration is disabled, is possible
+ * to force only MDI mode. Let's use autoconfig for forced
+ * MDIX mode.
+ */
+ case ETH_TP_MDI_AUTO:
+ val &= ~(MII_VSC73XX_PBC_FOR_SPD_AUTO_MDIX_DIS |
+ MII_VSC73XX_PBC_PAIR_SWAP_DIS |
+ MII_VSC73XX_PBC_POL_INV_DIS);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = phy_write(phydev, MII_VSC73XX_PHY_BYPASS_CTRL, val);
+ if (ret)
+ return ret;
+
+ return genphy_restart_aneg(phydev);
+}
+
static int vsc73xx_config_aneg(struct phy_device *phydev)
{
- /* The VSC73xx switches does not like to be instructed to
- * do autonegotiation in any way, it prefers that you just go
- * with the power-on/reset defaults. Writing some registers will
- * just make autonegotiation permanently fail.
- */
+ int ret;
+
+ ret = vsc73xx_mdix_set(phydev, phydev->mdix_ctrl);
+ if (ret)
+ return ret;
+
+ return genphy_config_aneg(phydev);
+}
+
+static int vsc73xx_mdix_get(struct phy_device *phydev, u8 *mdix)
+{
+ u16 reg_val;
+
+ reg_val = phy_read(phydev, MII_VSC73XX_PHY_AUX_CTRL_STAT);
+ if (reg_val & MII_VSC73XX_PACS_NO_MDI_X_IND)
+ *mdix = ETH_TP_MDI;
+ else
+ *mdix = ETH_TP_MDI_X;
+
return 0;
}
+static int vsc73xx_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = vsc73xx_mdix_get(phydev, &phydev->mdix);
+ if (ret < 0)
+ return ret;
+
+ return genphy_read_status(phydev);
+}
+
/* This adds a skew for both TX and RX clocks, so the skew should only be
* applied to "rgmii-id" interfaces. It may not work as expected
* on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces.
@@ -445,8 +602,11 @@ static struct phy_driver vsc82xx_driver[] = {
/* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
.config_aneg = vsc73xx_config_aneg,
+ .read_status = vsc73xx_read_status,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
+ .get_tunable = vsc73xx_get_tunable,
+ .set_tunable = vsc73xx_set_tunable,
}, {
.phy_id = PHY_ID_VSC7388,
.name = "Vitesse VSC7388",
@@ -454,8 +614,11 @@ static struct phy_driver vsc82xx_driver[] = {
/* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
.config_aneg = vsc73xx_config_aneg,
+ .read_status = vsc73xx_read_status,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
+ .get_tunable = vsc73xx_get_tunable,
+ .set_tunable = vsc73xx_set_tunable,
}, {
.phy_id = PHY_ID_VSC7395,
.name = "Vitesse VSC7395",
@@ -463,8 +626,11 @@ static struct phy_driver vsc82xx_driver[] = {
/* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
.config_aneg = vsc73xx_config_aneg,
+ .read_status = vsc73xx_read_status,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
+ .get_tunable = vsc73xx_get_tunable,
+ .set_tunable = vsc73xx_set_tunable,
}, {
.phy_id = PHY_ID_VSC7398,
.name = "Vitesse VSC7398",
@@ -472,8 +638,11 @@ static struct phy_driver vsc82xx_driver[] = {
/* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
.config_aneg = vsc73xx_config_aneg,
+ .read_status = vsc73xx_read_status,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
+ .get_tunable = vsc73xx_get_tunable,
+ .set_tunable = vsc73xx_set_tunable,
}, {
.phy_id = PHY_ID_VSC8662,
.name = "Vitesse VSC8662",
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index c33c3db3cc08..c97406c6004d 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -29,7 +29,7 @@
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/uaccess.h>
#include <asm/string.h>
@@ -542,7 +542,7 @@ ppp_async_encode(struct asyncppp *ap)
* and 7 (code-reject) must be sent as though no options
* had been negotiated.
*/
- islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
+ islcp = proto == PPP_LCP && count >= 3 && 1 <= data[2] && data[2] <= 7;
if (i == 0) {
if (islcp)
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index 4d2ff63f2ee2..d93aeacc0dba 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -16,7 +16,7 @@
#include <linux/ppp-comp.h>
#include <linux/zlib.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/*
* State for a Deflate (de)compressor.
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index eb9acfcaeb09..4583e15ad03a 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -44,7 +44,7 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/file.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/slhc_vj.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
@@ -1631,7 +1631,7 @@ static void ppp_setup(struct net_device *dev)
dev->netdev_ops = &ppp_netdev_ops;
SET_NETDEV_DEVTYPE(dev, &ppp_type);
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
dev->hard_header_len = PPP_HDRLEN;
dev->mtu = PPP_MRU;
@@ -2269,7 +2269,7 @@ static bool ppp_channel_bridge_input(struct channel *pch, struct sk_buff *skb)
if (!pchb)
goto out_rcu;
- spin_lock(&pchb->downl);
+ spin_lock_bh(&pchb->downl);
if (!pchb->chan) {
/* channel got unregistered */
kfree_skb(skb);
@@ -2281,7 +2281,7 @@ static bool ppp_channel_bridge_input(struct channel *pch, struct sk_buff *skb)
kfree_skb(skb);
outl:
- spin_unlock(&pchb->downl);
+ spin_unlock_bh(&pchb->downl);
out_rcu:
rcu_read_unlock();
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 208f6e24f37c..bcc1eaedf58f 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -56,7 +56,7 @@
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
#include <linux/scatterlist.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ppp_mppe.h"
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 45bf59ac8f57..644e99fc3623 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -43,7 +43,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/refcount.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/uaccess.h>
#define PPP_VERSION "2.4.2"
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
index ec20953e0f82..f8e6854781e6 100644
--- a/drivers/net/pse-pd/pse_core.c
+++ b/drivers/net/pse-pd/pse_core.c
@@ -401,9 +401,14 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev,
rdesc->ops = &pse_pi_ops;
rdesc->owner = pcdev->owner;
- rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_CURRENT;
- rinit_data->constraints.max_uA = MAX_PI_CURRENT;
+ rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
+
+ if (pcdev->ops->pi_set_current_limit) {
+ rinit_data->constraints.valid_ops_mask |=
+ REGULATOR_CHANGE_CURRENT;
+ rinit_data->constraints.max_uA = MAX_PI_CURRENT;
+ }
+
rinit_data->supply_regulator = "vpwr";
rconfig.dev = pcdev->dev;
@@ -780,6 +785,17 @@ static int pse_ethtool_c33_set_config(struct pse_control *psec,
*/
switch (config->c33_admin_control) {
case ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED:
+ /* We could have mismatch between admin_state_enabled and
+ * state reported by regulator_is_enabled. This can occur when
+ * the PI is forcibly turn off by the controller. Call
+ * regulator_disable on that case to fix the counters state.
+ */
+ if (psec->pcdev->pi[psec->id].admin_state_enabled &&
+ !regulator_is_enabled(psec->ps)) {
+ err = regulator_disable(psec->ps);
+ if (err)
+ break;
+ }
if (!psec->pcdev->pi[psec->id].admin_state_enabled)
err = regulator_enable(psec->ps);
break;
diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
index 2ea75686a319..5c4e88be46ee 100644
--- a/drivers/net/pse-pd/tps23881.c
+++ b/drivers/net/pse-pd/tps23881.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -737,6 +738,7 @@ static int tps23881_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tps23881_priv *priv;
+ struct gpio_desc *reset;
int ret;
u8 val;
@@ -749,6 +751,25 @@ static int tps23881_i2c_probe(struct i2c_client *client)
if (!priv)
return -ENOMEM;
+ reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset))
+ return dev_err_probe(&client->dev, PTR_ERR(reset), "Failed to get reset GPIO\n");
+
+ if (reset) {
+ /* TPS23880 datasheet (Rev G) indicates minimum reset pulse is 5us */
+ usleep_range(5, 10);
+ gpiod_set_value_cansleep(reset, 0); /* De-assert reset */
+
+ /* TPS23880 datasheet indicates the minimum time after power on reset
+ * should be 20ms, but the document describing how to load SRAM ("How
+ * to Load TPS2388x SRAM and Parity Code over I2C" (Rev E))
+ * indicates we should delay that programming by at least 50ms. So
+ * we'll wait the entire 50ms here to ensure we're safe to go to the
+ * SRAM loading proceedure.
+ */
+ msleep(50);
+ }
+
ret = i2c_smbus_read_byte_data(client, TPS23881_REG_DEVID);
if (ret < 0)
return ret;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 4eececc94513..318a0ef1af50 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -515,7 +515,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
/* MTU range: 68 - 4082 */
ndev->min_mtu = ETH_MIN_MTU;
ndev->max_mtu = RIONET_MAX_MTU;
- ndev->features = NETIF_F_LLTX;
+ ndev->lltx = true;
SET_NETDEV_DEV(ndev, &mport->dev);
ndev->ethtool_ops = &rionet_ethtool_ops;
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index 18df7ca66198..ee9fd3a94b96 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -77,7 +77,7 @@
#include <linux/timer.h>
#include <linux/uaccess.h>
#include <net/checksum.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static unsigned char *encode(unsigned char *cp, unsigned short n);
static long decode(unsigned char **cpp);
@@ -643,46 +643,57 @@ bad:
int
slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
{
- struct cstate *cs;
- unsigned ihl;
-
+ const struct tcphdr *th;
unsigned char index;
+ struct iphdr *iph;
+ struct cstate *cs;
+ unsigned int ihl;
- if(isize < 20) {
- /* The packet is shorter than a legal IP header */
+ /* The packet is shorter than a legal IP header.
+ * Also make sure isize is positive.
+ */
+ if (isize < (int)sizeof(struct iphdr)) {
+runt:
comp->sls_i_runt++;
- return slhc_toss( comp );
+ return slhc_toss(comp);
}
+ iph = (struct iphdr *)icp;
/* Peek at the IP header's IHL field to find its length */
- ihl = icp[0] & 0xf;
- if(ihl < 20 / 4){
- /* The IP header length field is too small */
- comp->sls_i_runt++;
- return slhc_toss( comp );
- }
- index = icp[9];
- icp[9] = IPPROTO_TCP;
+ ihl = iph->ihl;
+ /* The IP header length field is too small,
+ * or packet is shorter than the IP header followed
+ * by minimal tcp header.
+ */
+ if (ihl < 5 || isize < ihl * 4 + sizeof(struct tcphdr))
+ goto runt;
+
+ index = iph->protocol;
+ iph->protocol = IPPROTO_TCP;
if (ip_fast_csum(icp, ihl)) {
/* Bad IP header checksum; discard */
comp->sls_i_badcheck++;
- return slhc_toss( comp );
+ return slhc_toss(comp);
}
- if(index > comp->rslot_limit) {
+ if (index > comp->rslot_limit) {
comp->sls_i_error++;
return slhc_toss(comp);
}
-
+ th = (struct tcphdr *)(icp + ihl * 4);
+ if (th->doff < sizeof(struct tcphdr) / 4)
+ goto runt;
+ if (isize < ihl * 4 + th->doff * 4)
+ goto runt;
/* Update local state */
cs = &comp->rstate[comp->recv_current = index];
comp->flags &=~ SLF_TOSS;
- memcpy(&cs->cs_ip,icp,20);
- memcpy(&cs->cs_tcp,icp + ihl*4,20);
+ memcpy(&cs->cs_ip, iph, sizeof(*iph));
+ memcpy(&cs->cs_tcp, th, sizeof(*th));
if (ihl > 5)
- memcpy(cs->cs_ipopt, icp + sizeof(struct iphdr), (ihl - 5) * 4);
- if (cs->cs_tcp.doff > 5)
- memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
- cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
+ memcpy(cs->cs_ipopt, &iph[1], (ihl - 5) * 4);
+ if (th->doff > 5)
+ memcpy(cs->cs_tcpopt, &th[1], (th->doff - 5) * 4);
+ cs->cs_hsize = ihl*2 + th->doff*2;
cs->initialized = true;
/* Put headers back on packet
* Neither header checksum is recalculated
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index d591e33268e5..55aa8d0c8e1f 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -893,7 +893,7 @@ static const struct mii_phy_ops bcm5201_phy_ops = {
.read_link = genmii_read_link,
};
-static struct mii_phy_def bcm5201_phy_def = {
+static const struct mii_phy_def bcm5201_phy_def = {
.phy_id = 0x00406210,
.phy_id_mask = 0xfffffff0,
.name = "BCM5201",
@@ -912,7 +912,7 @@ static const struct mii_phy_ops bcm5221_phy_ops = {
.read_link = genmii_read_link,
};
-static struct mii_phy_def bcm5221_phy_def = {
+static const struct mii_phy_def bcm5221_phy_def = {
.phy_id = 0x004061e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5221",
@@ -930,7 +930,8 @@ static const struct mii_phy_ops bcm5241_phy_ops = {
.poll_link = genmii_poll_link,
.read_link = genmii_read_link,
};
-static struct mii_phy_def bcm5241_phy_def = {
+
+static const struct mii_phy_def bcm5241_phy_def = {
.phy_id = 0x0143bc30,
.phy_id_mask = 0xfffffff0,
.name = "BCM5241",
@@ -949,7 +950,7 @@ static const struct mii_phy_ops bcm5400_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5400_phy_def = {
+static const struct mii_phy_def bcm5400_phy_def = {
.phy_id = 0x00206040,
.phy_id_mask = 0xfffffff0,
.name = "BCM5400",
@@ -968,7 +969,7 @@ static const struct mii_phy_ops bcm5401_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5401_phy_def = {
+static const struct mii_phy_def bcm5401_phy_def = {
.phy_id = 0x00206050,
.phy_id_mask = 0xfffffff0,
.name = "BCM5401",
@@ -987,7 +988,7 @@ static const struct mii_phy_ops bcm5411_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5411_phy_def = {
+static const struct mii_phy_def bcm5411_phy_def = {
.phy_id = 0x00206070,
.phy_id_mask = 0xfffffff0,
.name = "BCM5411",
@@ -1007,7 +1008,7 @@ static const struct mii_phy_ops bcm5421_phy_ops = {
.enable_fiber = bcm5421_enable_fiber,
};
-static struct mii_phy_def bcm5421_phy_def = {
+static const struct mii_phy_def bcm5421_phy_def = {
.phy_id = 0x002060e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5421",
@@ -1026,7 +1027,7 @@ static const struct mii_phy_ops bcm5421k2_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5421k2_phy_def = {
+static const struct mii_phy_def bcm5421k2_phy_def = {
.phy_id = 0x002062e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5421-K2",
@@ -1045,7 +1046,7 @@ static const struct mii_phy_ops bcm5461_phy_ops = {
.enable_fiber = bcm5461_enable_fiber,
};
-static struct mii_phy_def bcm5461_phy_def = {
+static const struct mii_phy_def bcm5461_phy_def = {
.phy_id = 0x002060c0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5461",
@@ -1064,7 +1065,7 @@ static const struct mii_phy_ops bcm5462V_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5462V_phy_def = {
+static const struct mii_phy_def bcm5462V_phy_def = {
.phy_id = 0x002060d0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5462-Vesta",
@@ -1094,7 +1095,7 @@ static const struct mii_phy_ops marvell88e1111_phy_ops = {
/* two revs in darwin for the 88e1101 ... I could use a datasheet
* to get the proper names...
*/
-static struct mii_phy_def marvell88e1101v1_phy_def = {
+static const struct mii_phy_def marvell88e1101v1_phy_def = {
.phy_id = 0x01410c20,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1101v1",
@@ -1102,7 +1103,8 @@ static struct mii_phy_def marvell88e1101v1_phy_def = {
.magic_aneg = 1,
.ops = &marvell88e1101_phy_ops
};
-static struct mii_phy_def marvell88e1101v2_phy_def = {
+
+static const struct mii_phy_def marvell88e1101v2_phy_def = {
.phy_id = 0x01410c60,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1101v2",
@@ -1110,7 +1112,8 @@ static struct mii_phy_def marvell88e1101v2_phy_def = {
.magic_aneg = 1,
.ops = &marvell88e1101_phy_ops
};
-static struct mii_phy_def marvell88e1111_phy_def = {
+
+static const struct mii_phy_def marvell88e1111_phy_def = {
.phy_id = 0x01410cc0,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1111",
@@ -1127,7 +1130,7 @@ static const struct mii_phy_ops generic_phy_ops = {
.read_link = genmii_read_link
};
-static struct mii_phy_def genmii_phy_def = {
+static const struct mii_phy_def genmii_phy_def = {
.phy_id = 0x00000000,
.phy_id_mask = 0x00000000,
.name = "Generic MII",
@@ -1136,7 +1139,7 @@ static struct mii_phy_def genmii_phy_def = {
.ops = &generic_phy_ops
};
-static struct mii_phy_def* mii_phy_table[] = {
+static const struct mii_phy_def *mii_phy_table[] = {
&bcm5201_phy_def,
&bcm5221_phy_def,
&bcm5241_phy_def,
@@ -1156,9 +1159,9 @@ static struct mii_phy_def* mii_phy_table[] = {
int sungem_phy_probe(struct mii_phy *phy, int mii_id)
{
+ const struct mii_phy_def *def;
int rc;
u32 id;
- struct mii_phy_def* def;
int i;
/* We do not reset the mii_phy structure as the driver
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 77574f7a3bd4..5aa41d5f7765 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1162,7 +1162,6 @@ static const struct file_operations tap_fops = {
.read_iter = tap_read_iter,
.write_iter = tap_write_iter,
.poll = tap_poll,
- .llseek = no_llseek,
.unlocked_ioctl = tap_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index ab1935a4aa2c..18191d5a8bd4 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -2189,12 +2189,12 @@ static void team_setup(struct net_device *dev)
* Let this up to underlay drivers.
*/
dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
-
- dev->features |= NETIF_F_LLTX;
- dev->features |= NETIF_F_GRO;
+ dev->lltx = true;
/* Don't allow team devices to change network namespaces. */
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netns_local = true;
+
+ dev->features |= NETIF_F_GRO;
dev->hw_features = TEAM_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 1d06c560c5e6..9a0f6eb32016 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -990,10 +990,11 @@ static int tun_net_init(struct net_device *dev)
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
- dev->features = dev->hw_features | NETIF_F_LLTX;
+ dev->features = dev->hw_features;
dev->vlan_features = dev->features &
~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
+ dev->lltx = true;
tun->flags = (tun->flags & ~TUN_FEATURES) |
(ifr->ifr_flags & TUN_FEATURES);
@@ -1129,7 +1130,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
}
- /* NETIF_F_LLTX requires to do our own update of trans_start */
+ /* dev->lltx requires to do our own update of trans_start */
queue = netdev_get_tx_queue(dev, txq);
txq_trans_cond_update(queue);
@@ -3451,6 +3452,12 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
struct tun_file *tfile = file->private_data;
int ret;
+ if (on) {
+ ret = file_f_owner_allocate(file);
+ if (ret)
+ goto out;
+ }
+
if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
goto out;
@@ -3536,7 +3543,6 @@ static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
static const struct file_operations tun_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read_iter = tun_chr_read_iter,
.write_iter = tun_chr_write_iter,
.poll = tun_chr_poll,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 6d61052353f0..a6469235d904 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -418,7 +418,8 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
event->wValue ? "on" : "off");
- usbnet_link_change(dev, !!event->wValue, 0);
+ if (netif_carrier_ok(dev->net) != !!event->wValue)
+ usbnet_link_change(dev, !!event->wValue, 0);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n",
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 687d70cfc556..46afb95ffabe 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -286,10 +286,11 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
return;
}
- if (urb->actual_length <= IPHETH_IP_ALIGN) {
- dev->net->stats.rx_length_errors++;
- return;
- }
+ /* iPhone may periodically send URBs with no payload
+ * on the "bulk in" endpoint. It is safe to ignore them.
+ */
+ if (urb->actual_length == 0)
+ goto rx_submit;
/* RX URBs starting with 0x00 0x01 do not encapsulate Ethernet frames,
* but rather are control frames. Their purpose is not documented, and
@@ -298,7 +299,8 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
* URB received from the bulk IN endpoint.
*/
if (unlikely
- (((char *)urb->transfer_buffer)[0] == 0 &&
+ (urb->actual_length == 4 &&
+ ((char *)urb->transfer_buffer)[0] == 0 &&
((char *)urb->transfer_buffer)[1] == 1))
goto rx_submit;
@@ -306,7 +308,6 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
if (retval != 0) {
dev_err(&dev->intf->dev, "%s: callback retval: %d\n",
__func__, retval);
- return;
}
rx_submit:
@@ -354,13 +355,14 @@ static int ipheth_carrier_set(struct ipheth_device *dev)
0x02, /* index */
dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE,
IPHETH_CTRL_TIMEOUT);
- if (retval < 0) {
+ if (retval <= 0) {
dev_err(&dev->intf->dev, "%s: usb_control_msg: %d\n",
__func__, retval);
return retval;
}
- if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) {
+ if ((retval == 1 && dev->ctrl_buf[0] == IPHETH_CARRIER_ON) ||
+ (retval >= 2 && dev->ctrl_buf[1] == IPHETH_CARRIER_ON)) {
netif_carrier_on(dev->net);
if (dev->tx_urb->status != -EINPROGRESS)
netif_wake_queue(dev->net);
@@ -475,8 +477,8 @@ static int ipheth_close(struct net_device *net)
{
struct ipheth_device *dev = netdev_priv(net);
- cancel_delayed_work_sync(&dev->carrier_work);
netif_stop_queue(net);
+ cancel_delayed_work_sync(&dev->carrier_work);
return 0;
}
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index b0c0c9dd6a02..5d4a1fd2b524 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -17,7 +17,7 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/*
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 15e12f46d0ea..a5612c799f5e 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -5178,14 +5178,23 @@ static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac)
data = (u8 *)mac;
data += __le16_to_cpu(mac->fw_offset);
- generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length, data,
- type);
+ if (generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length,
+ data, type) < 0) {
+ dev_err(&tp->intf->dev, "Write %s fw fail\n",
+ type ? "PLA" : "USB");
+ return;
+ }
ocp_write_word(tp, type, __le16_to_cpu(mac->bp_ba_addr),
__le16_to_cpu(mac->bp_ba_value));
- generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD,
- __le16_to_cpu(mac->bp_num) << 1, mac->bp, type);
+ if (generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD,
+ ALIGN(__le16_to_cpu(mac->bp_num) << 1, 4),
+ mac->bp, type) < 0) {
+ dev_err(&tp->intf->dev, "Write %s bp fail\n",
+ type ? "PLA" : "USB");
+ return;
+ }
bp_en_addr = __le16_to_cpu(mac->bp_en_addr);
if (bp_en_addr)
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 673d3aa83792..3d239b8d1a1b 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -30,7 +30,7 @@ static const char driver_name[] = "sierra_net";
#include <linux/usb/cdc.h>
#include <net/ip.h>
#include <net/udp.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/usb/usbnet.h>
#define SWI_USB_REQUEST_GET_FW_ATTR 0x06
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 9fd516e8bb10..ee1b5fd7b491 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -61,9 +61,6 @@
/*-------------------------------------------------------------------------*/
-// randomly generated ethernet address
-static u8 node_id [ETH_ALEN];
-
/* use ethtool to change the level for any given device */
static int msg_level = -1;
module_param (msg_level, int, 0);
@@ -467,10 +464,15 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
void usbnet_defer_kevent (struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
- if (!schedule_work (&dev->kevent))
- netdev_dbg(dev->net, "kevent %s may have been dropped\n", usbnet_event_names[work]);
- else
- netdev_dbg(dev->net, "kevent %s scheduled\n", usbnet_event_names[work]);
+ if (!usbnet_going_away(dev)) {
+ if (!schedule_work(&dev->kevent))
+ netdev_dbg(dev->net,
+ "kevent %s may have been dropped\n",
+ usbnet_event_names[work]);
+ else
+ netdev_dbg(dev->net,
+ "kevent %s scheduled\n", usbnet_event_names[work]);
+ }
}
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
@@ -538,7 +540,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
tasklet_schedule (&dev->bh);
break;
case 0:
- __usbnet_queue_skb(&dev->rxq, skb, rx_start);
+ if (!usbnet_going_away(dev))
+ __usbnet_queue_skb(&dev->rxq, skb, rx_start);
}
} else {
netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
@@ -846,9 +849,18 @@ int usbnet_stop (struct net_device *net)
/* deferred work (timer, softirq, task) must also stop */
dev->flags = 0;
- del_timer_sync (&dev->delay);
- tasklet_kill (&dev->bh);
+ del_timer_sync(&dev->delay);
+ tasklet_kill(&dev->bh);
cancel_work_sync(&dev->kevent);
+
+ /* We have cyclic dependencies. Those calls are needed
+ * to break a cycle. We cannot fall into the gaps because
+ * we have a flag
+ */
+ tasklet_kill(&dev->bh);
+ del_timer_sync(&dev->delay);
+ cancel_work_sync(&dev->kevent);
+
if (!pm)
usb_autopm_put_interface(dev->intf);
@@ -1174,7 +1186,8 @@ fail_halt:
status);
} else {
clear_bit (EVENT_RX_HALT, &dev->flags);
- tasklet_schedule (&dev->bh);
+ if (!usbnet_going_away(dev))
+ tasklet_schedule(&dev->bh);
}
}
@@ -1199,7 +1212,8 @@ fail_halt:
usb_autopm_put_interface(dev->intf);
fail_lowmem:
if (resched)
- tasklet_schedule (&dev->bh);
+ if (!usbnet_going_away(dev))
+ tasklet_schedule(&dev->bh);
}
}
@@ -1562,6 +1576,7 @@ static void usbnet_bh (struct timer_list *t)
} else if (netif_running (dev->net) &&
netif_device_present (dev->net) &&
netif_carrier_ok(dev->net) &&
+ !usbnet_going_away(dev) &&
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_PAUSED, &dev->flags) &&
!test_bit(EVENT_RX_HALT, &dev->flags)) {
@@ -1609,6 +1624,7 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (!dev)
return;
+ usbnet_mark_going_away(dev);
xdev = interface_to_usbdev (intf);
@@ -1725,7 +1741,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->net = net;
strscpy(net->name, "usb%d", sizeof(net->name));
- eth_hw_addr_set(net, node_id);
/* rx and tx sides can use different message sizes;
* bind() should set rx_urb_size in that case.
@@ -1801,9 +1816,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
goto out4;
}
- /* let userspace know we have a random address */
- if (ether_addr_equal(net->dev_addr, node_id))
- net->addr_assign_type = NET_ADDR_RANDOM;
+ /* this flags the device for user space */
+ if (!is_valid_ether_addr(net->dev_addr))
+ eth_hw_addr_random(net);
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
SET_NETDEV_DEVTYPE(net, &wlan_type);
@@ -1855,6 +1870,7 @@ out1:
* may trigger an error resubmitting itself and, worse,
* schedule a timer. So we kill it all just in case.
*/
+ usbnet_mark_going_away(dev);
cancel_work_sync(&dev->kevent);
del_timer_sync(&dev->delay);
free_netdev(net);
@@ -2211,7 +2227,6 @@ static int __init usbnet_init(void)
BUILD_BUG_ON(
sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data));
- eth_random_addr(node_id);
return 0;
}
module_init(usbnet_init);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 426e68a95067..18148e068aa0 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1696,11 +1696,12 @@ static void veth_setup(struct net_device *dev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->priv_flags |= IFF_NO_QUEUE;
dev->priv_flags |= IFF_PHONY_HEADROOM;
+ dev->priv_flags |= IFF_DISABLE_NETPOLL;
+ dev->lltx = true;
dev->netdev_ops = &veth_netdev_ops;
dev->xdp_metadata_ops = &veth_xdp_metadata_ops;
dev->ethtool_ops = &veth_ethtool_ops;
- dev->features |= NETIF_F_LLTX;
dev->features |= VETH_FEATURES;
dev->vlan_features = dev->features &
~(NETIF_F_HW_VLAN_CTAG_TX |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3f10c72743e9..f8131f92a392 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -356,6 +356,9 @@ struct receive_queue {
struct xdp_rxq_info xsk_rxq_info;
struct xdp_buff **xsk_buffs;
+
+ /* Do dma by self */
+ bool do_dma;
};
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -885,7 +888,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
void *buf;
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
- if (buf)
+ if (buf && rq->do_dma)
virtnet_rq_unmap(rq, buf, *len);
return buf;
@@ -898,6 +901,11 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
u32 offset;
void *head;
+ if (!rq->do_dma) {
+ sg_init_one(rq->sg, buf, len);
+ return;
+ }
+
head = page_address(rq->alloc_frag.page);
offset = buf - head;
@@ -923,42 +931,44 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
head = page_address(alloc_frag->page);
- dma = head;
+ if (rq->do_dma) {
+ dma = head;
+
+ /* new pages */
+ if (!alloc_frag->offset) {
+ if (rq->last_dma) {
+ /* Now, the new page is allocated, the last dma
+ * will not be used. So the dma can be unmapped
+ * if the ref is 0.
+ */
+ virtnet_rq_unmap(rq, rq->last_dma, 0);
+ rq->last_dma = NULL;
+ }
- /* new pages */
- if (!alloc_frag->offset) {
- if (rq->last_dma) {
- /* Now, the new page is allocated, the last dma
- * will not be used. So the dma can be unmapped
- * if the ref is 0.
- */
- virtnet_rq_unmap(rq, rq->last_dma, 0);
- rq->last_dma = NULL;
- }
+ dma->len = alloc_frag->size - sizeof(*dma);
- dma->len = alloc_frag->size - sizeof(*dma);
+ addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
+ dma->len, DMA_FROM_DEVICE, 0);
+ if (virtqueue_dma_mapping_error(rq->vq, addr))
+ return NULL;
- addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
- dma->len, DMA_FROM_DEVICE, 0);
- if (virtqueue_dma_mapping_error(rq->vq, addr))
- return NULL;
+ dma->addr = addr;
+ dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
- dma->addr = addr;
- dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
+ /* Add a reference to dma to prevent the entire dma from
+ * being released during error handling. This reference
+ * will be freed after the pages are no longer used.
+ */
+ get_page(alloc_frag->page);
+ dma->ref = 1;
+ alloc_frag->offset = sizeof(*dma);
- /* Add a reference to dma to prevent the entire dma from
- * being released during error handling. This reference
- * will be freed after the pages are no longer used.
- */
- get_page(alloc_frag->page);
- dma->ref = 1;
- alloc_frag->offset = sizeof(*dma);
+ rq->last_dma = dma;
+ }
- rq->last_dma = dma;
+ ++dma->ref;
}
- ++dma->ref;
-
buf = head + alloc_frag->offset;
get_page(alloc_frag->page);
@@ -967,19 +977,6 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
return buf;
}
-static void virtnet_rq_set_premapped(struct virtnet_info *vi)
-{
- int i;
-
- /* disable for big mode */
- if (!vi->mergeable_rx_bufs && vi->big_packets)
- return;
-
- for (i = 0; i < vi->max_queue_pairs; i++)
- /* error should never happen */
- BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
-}
-
static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
{
struct virtnet_info *vi = vq->vdev->priv;
@@ -993,7 +990,7 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
return;
}
- if (!vi->big_packets || vi->mergeable_rx_bufs)
+ if (rq->do_dma)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_free_buf(vi, rq, buf);
@@ -1810,6 +1807,11 @@ static struct sk_buff *receive_small(struct net_device *dev,
struct page *page = virt_to_head_page(buf);
struct sk_buff *skb;
+ /* We passed the address of virtnet header to virtio-core,
+ * so truncate the padding.
+ */
+ buf -= VIRTNET_RX_PAD + xdp_headroom;
+
len -= vi->hdr_len;
u64_stats_add(&stats->bytes, len);
@@ -2425,12 +2427,14 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
if (unlikely(!buf))
return -ENOMEM;
- virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
- vi->hdr_len + GOOD_PACKET_LEN);
+ buf += VIRTNET_RX_PAD + xdp_headroom;
+
+ virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- virtnet_rq_unmap(rq, buf, 0);
+ if (rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -2544,7 +2548,8 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- virtnet_rq_unmap(rq, buf, 0);
+ if (rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -2701,7 +2706,7 @@ static int virtnet_receive_packets(struct virtnet_info *vi,
}
} else {
while (packets < budget &&
- (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
+ (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats);
packets++;
}
@@ -2867,8 +2872,8 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
if (err < 0)
goto err_xdp_reg_mem_model;
- virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, qp_index));
+ virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
return 0;
@@ -2885,6 +2890,25 @@ static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim)
net_dim_work_cancel(dim);
}
+static void virtnet_update_settings(struct virtnet_info *vi)
+{
+ u32 speed;
+ u8 duplex;
+
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
+ return;
+
+ virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
+
+ if (ethtool_validate_speed(speed))
+ vi->speed = speed;
+
+ virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
+
+ if (ethtool_validate_duplex(duplex))
+ vi->duplex = duplex;
+}
+
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -2903,6 +2927,15 @@ static int virtnet_open(struct net_device *dev)
goto err_enable_qp;
}
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
+ if (vi->status & VIRTIO_NET_S_LINK_UP)
+ netif_carrier_on(vi->dev);
+ virtio_config_driver_enable(vi->vdev);
+ } else {
+ vi->status = VIRTIO_NET_S_LINK_UP;
+ netif_carrier_on(dev);
+ }
+
return 0;
err_enable_qp:
@@ -3381,12 +3414,22 @@ static int virtnet_close(struct net_device *dev)
disable_delayed_refill(vi);
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
+ /* Prevent the config change callback from changing carrier
+ * after close
+ */
+ virtio_config_driver_disable(vi->vdev);
+ /* Stop getting status/speed updates: we don't care until next
+ * open
+ */
+ cancel_work_sync(&vi->config_work);
for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_disable_queue_pair(vi, i);
virtnet_cancel_dim(vi, &vi->rq[i].dim);
}
+ netif_carrier_off(dev);
+
return 0;
}
@@ -5095,25 +5138,6 @@ static void virtnet_init_settings(struct net_device *dev)
vi->duplex = DUPLEX_UNKNOWN;
}
-static void virtnet_update_settings(struct virtnet_info *vi)
-{
- u32 speed;
- u8 duplex;
-
- if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
- return;
-
- virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
-
- if (ethtool_validate_speed(speed))
- vi->speed = speed;
-
- virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
-
- if (ethtool_validate_duplex(duplex))
- vi->duplex = duplex;
-}
-
static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
{
return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
@@ -5892,7 +5916,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->rq[i].alloc_frag.page) {
- if (vi->rq[i].last_dma)
+ if (vi->rq[i].do_dma && vi->rq[i].last_dma)
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page);
}
@@ -6090,8 +6114,6 @@ static int init_vqs(struct virtnet_info *vi)
if (ret)
goto err_free;
- virtnet_rq_set_premapped(vi);
-
cpus_read_lock();
virtnet_set_affinity(vi);
cpus_read_unlock();
@@ -6524,6 +6546,9 @@ static int virtnet_probe(struct virtio_device *vdev)
goto free_failover;
}
+ /* Disable config change notification until ndo_open. */
+ virtio_config_driver_disable(vi->vdev);
+
virtio_device_ready(vdev);
virtnet_set_queues(vi, vi->curr_queue_pairs);
@@ -6573,19 +6598,11 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->device_stats_cap = le64_to_cpu(v);
}
- rtnl_unlock();
-
- err = virtnet_cpu_notif_add(vi);
- if (err) {
- pr_debug("virtio_net: registering cpu notifier failed\n");
- goto free_unregister_netdev;
- }
-
/* Assume link up if device can't report link status,
otherwise get link status from config. */
netif_carrier_off(dev);
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
- schedule_work(&vi->config_work);
+ virtnet_config_changed_work(&vi->config_work);
} else {
vi->status = VIRTIO_NET_S_LINK_UP;
virtnet_update_settings(vi);
@@ -6597,6 +6614,14 @@ static int virtnet_probe(struct virtio_device *vdev)
set_bit(guest_offloads[i], &vi->guest_offloads);
vi->guest_offloads_capable = vi->guest_offloads;
+ rtnl_unlock();
+
+ err = virtnet_cpu_notif_add(vi);
+ if (err) {
+ pr_debug("virtio_net: registering cpu notifier failed\n");
+ goto free_unregister_netdev;
+ }
+
pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
dev->name, max_queue_pairs);
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
index a6c787454a1a..1341374a4588 100644
--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
@@ -148,7 +148,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
} else { /* XDP buffer from page pool */
page = virt_to_page(xdpf->data);
tbi->dma_addr = page_pool_get_dma_addr(page) +
- VMXNET3_XDP_HEADROOM;
+ (xdpf->data - (void *)xdpf);
dma_sync_single_for_device(&adapter->pdev->dev,
tbi->dma_addr, buf_size,
DMA_TO_DEVICE);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 040f0bb36c0e..4087f72f0d2b 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -37,6 +37,7 @@
#include <net/sch_generic.h>
#include <net/netns/generic.h>
#include <net/netfilter/nf_conntrack.h>
+#include <net/inet_dscp.h>
#define DRV_NAME "vrf"
#define DRV_VERSION "1.1"
@@ -520,7 +521,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
/* needed to match OIF rule */
fl4.flowi4_l3mdev = vrf_dev->ifindex;
fl4.flowi4_iif = LOOPBACK_IFINDEX;
- fl4.flowi4_tos = RT_TOS(ip4h->tos);
+ fl4.flowi4_tos = ip4h->tos & INET_DSCP_MASK;
fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
fl4.flowi4_proto = ip4h->protocol;
fl4.daddr = ip4h->daddr;
@@ -607,7 +608,9 @@ static void vrf_finish_direct(struct sk_buff *skb)
eth_zero_addr(eth->h_dest);
eth->h_proto = skb->protocol;
+ rcu_read_lock_bh();
dev_queue_xmit_nit(skb, vrf_dev);
+ rcu_read_unlock_bh();
skb_pull(skb, ETH_HLEN);
}
@@ -1634,10 +1637,10 @@ static void vrf_setup(struct net_device *dev)
eth_hw_addr_random(dev);
/* don't acquire vrf device's netif_tx_lock when transmitting */
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
/* don't allow vrf devices to change network namespaces. */
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netns_local = true;
/* does not make sense for a VLAN to be added to a vrf device */
dev->features |= NETIF_F_VLAN_CHALLENGED;
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index 4c260074c091..53fb76d574c6 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -83,13 +83,13 @@ static void vsockmon_setup(struct net_device *dev)
{
dev->type = ARPHRD_VSOCKMON;
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->lltx = true;
dev->netdev_ops = &vsockmon_ops;
dev->ethtool_ops = &vsockmon_ethtool_ops;
dev->needs_free_netdev = true;
- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
- NETIF_F_HIGHDMA | NETIF_F_LLTX;
+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
dev->flags = IFF_NOARP;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index ba59e92ab941..6e9a3795846a 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -277,8 +277,7 @@ static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+ rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan,
@@ -2690,11 +2689,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *rdst, *fdst = NULL;
const struct ip_tunnel_info *info;
- bool did_rsc = false;
struct vxlan_fdb *f;
struct ethhdr *eth;
__be32 vni = 0;
u32 nhid = 0;
+ bool did_rsc;
info = skb_tunnel_info(skb);
@@ -3322,7 +3321,6 @@ static void vxlan_setup(struct net_device *dev)
dev->needs_free_netdev = true;
SET_NETDEV_DEVTYPE(dev, &vxlan_type);
- dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
@@ -3332,7 +3330,9 @@ static void vxlan_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
netif_keep_dst(dev);
- dev->priv_flags |= IFF_NO_QUEUE | IFF_CHANGE_PROTO_DOWN;
+ dev->priv_flags |= IFF_NO_QUEUE;
+ dev->change_proto_down = true;
+ dev->lltx = true;
/* MTU range: 68 - 65535 */
dev->min_mtu = ETH_MIN_MTU;
@@ -4913,9 +4913,13 @@ static int __init vxlan_init_module(void)
if (rc)
goto out4;
- vxlan_vnifilter_init();
+ rc = vxlan_vnifilter_init();
+ if (rc)
+ goto out5;
return 0;
+out5:
+ rtnl_link_unregister(&vxlan_link_ops);
out4:
unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
out3:
diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
index b35d96b78843..76a351a997d5 100644
--- a/drivers/net/vxlan/vxlan_private.h
+++ b/drivers/net/vxlan/vxlan_private.h
@@ -202,7 +202,7 @@ int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan,
int vxlan_vnigroup_init(struct vxlan_dev *vxlan);
void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan);
-void vxlan_vnifilter_init(void);
+int vxlan_vnifilter_init(void);
void vxlan_vnifilter_uninit(void);
void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni,
struct vxlan_vni_node *vninode,
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
index 9c59d0bf8c3d..d2023e7131bd 100644
--- a/drivers/net/vxlan/vxlan_vnifilter.c
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -992,19 +992,18 @@ static int vxlan_vnifilter_process(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
-void vxlan_vnifilter_init(void)
+static const struct rtnl_msg_handler vxlan_vnifilter_rtnl_msg_handlers[] = {
+ {THIS_MODULE, PF_BRIDGE, RTM_GETTUNNEL, NULL, vxlan_vnifilter_dump, 0},
+ {THIS_MODULE, PF_BRIDGE, RTM_NEWTUNNEL, vxlan_vnifilter_process, NULL, 0},
+ {THIS_MODULE, PF_BRIDGE, RTM_DELTUNNEL, vxlan_vnifilter_process, NULL, 0},
+};
+
+int vxlan_vnifilter_init(void)
{
- rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETTUNNEL, NULL,
- vxlan_vnifilter_dump, 0);
- rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWTUNNEL,
- vxlan_vnifilter_process, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELTUNNEL,
- vxlan_vnifilter_process, NULL, 0);
+ return rtnl_register_many(vxlan_vnifilter_rtnl_msg_handlers);
}
void vxlan_vnifilter_uninit(void)
{
- rtnl_unregister(PF_BRIDGE, RTM_GETTUNNEL);
- rtnl_unregister(PF_BRIDGE, RTM_NEWTUNNEL);
- rtnl_unregister(PF_BRIDGE, RTM_DELTUNNEL);
+ rtnl_unregister_many(vxlan_vnifilter_rtnl_msg_handlers);
}
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 3feb36ee5bfb..45e9b908dbfb 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -289,7 +289,7 @@ static void wg_setup(struct net_device *dev)
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->priv_flags |= IFF_NO_QUEUE;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
dev->features |= WG_NETDEV_FEATURES;
dev->hw_features |= WG_NETDEV_FEATURES;
dev->hw_enc_features |= WG_NETDEV_FEATURES;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index b93a64bf8190..35bfe7232e95 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -1774,7 +1774,7 @@ static ssize_t ath10k_write_simulate_radar(struct file *file,
if (!arvif->is_started)
return -EINVAL;
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
return count;
}
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a5da32e87106..646e1737d4c4 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1437,7 +1437,7 @@ static void ath10k_recalc_radar_detection(struct ath10k *ar)
* by indicating that radar was detected.
*/
ath10k_warn(ar, "failed to start CAC: %d\n", ret);
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
}
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index fe2344598364..4861179b2217 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3990,7 +3990,7 @@ static void ath10k_radar_detected(struct ath10k *ar)
if (ar->dfs_block_radar_events)
ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
}
static void ath10k_radar_confirmation_work(struct work_struct *work)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 634d385fd9ad..97b12f51ef28 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -413,7 +413,7 @@ static int ath11k_ahb_power_up(struct ath11k_base *ab)
return ret;
}
-static void ath11k_ahb_power_down(struct ath11k_base *ab, bool is_suspend)
+static void ath11k_ahb_power_down(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
@@ -1280,7 +1280,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev)
struct ath11k_base *ab = platform_get_drvdata(pdev);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_ahb_power_down(ab, false);
+ ath11k_ahb_power_down(ab);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
goto qmi_fail;
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 03187df26000..ccf4ad35fdc3 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -906,6 +906,12 @@ int ath11k_core_suspend(struct ath11k_base *ab)
return ret;
}
+ ret = ath11k_wow_enable(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret);
+ return ret;
+ }
+
ret = ath11k_dp_rx_pktlog_stop(ab, false);
if (ret) {
ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
@@ -916,85 +922,29 @@ int ath11k_core_suspend(struct ath11k_base *ab)
ath11k_ce_stop_shadow_timers(ab);
ath11k_dp_stop_shadow_timers(ab);
- /* PM framework skips suspend_late/resume_early callbacks
- * if other devices report errors in their suspend callbacks.
- * However ath11k_core_resume() would still be called because
- * here we return success thus kernel put us on dpm_suspended_list.
- * Since we won't go through a power down/up cycle, there is
- * no chance to call complete(&ab->restart_completed) in
- * ath11k_core_restart(), making ath11k_core_resume() timeout.
- * So call it here to avoid this issue. This also works in case
- * no error happens thus suspend_late/resume_early get called,
- * because it will be reinitialized in ath11k_core_resume_early().
- */
- complete(&ab->restart_completed);
-
- return 0;
-}
-EXPORT_SYMBOL(ath11k_core_suspend);
-
-int ath11k_core_suspend_late(struct ath11k_base *ab)
-{
- struct ath11k_pdev *pdev;
- struct ath11k *ar;
-
- if (!ab->hw_params.supports_suspend)
- return -EOPNOTSUPP;
-
- /* so far single_pdev_only chips have supports_suspend as true
- * and only the first pdev is valid.
- */
- pdev = ath11k_core_get_single_pdev(ab);
- ar = pdev->ar;
- if (!ar || ar->state != ATH11K_STATE_OFF)
- return 0;
-
ath11k_hif_irq_disable(ab);
ath11k_hif_ce_irq_disable(ab);
- ath11k_hif_power_down(ab, true);
+ ret = ath11k_hif_suspend(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
+ return ret;
+ }
return 0;
}
-EXPORT_SYMBOL(ath11k_core_suspend_late);
-
-int ath11k_core_resume_early(struct ath11k_base *ab)
-{
- int ret;
- struct ath11k_pdev *pdev;
- struct ath11k *ar;
-
- if (!ab->hw_params.supports_suspend)
- return -EOPNOTSUPP;
-
- /* so far single_pdev_only chips have supports_suspend as true
- * and only the first pdev is valid.
- */
- pdev = ath11k_core_get_single_pdev(ab);
- ar = pdev->ar;
- if (!ar || ar->state != ATH11K_STATE_OFF)
- return 0;
-
- reinit_completion(&ab->restart_completed);
- ret = ath11k_hif_power_up(ab);
- if (ret)
- ath11k_warn(ab, "failed to power up hif during resume: %d\n", ret);
-
- return ret;
-}
-EXPORT_SYMBOL(ath11k_core_resume_early);
+EXPORT_SYMBOL(ath11k_core_suspend);
int ath11k_core_resume(struct ath11k_base *ab)
{
int ret;
struct ath11k_pdev *pdev;
struct ath11k *ar;
- long time_left;
if (!ab->hw_params.supports_suspend)
return -EOPNOTSUPP;
- /* so far single_pdev_only chips have supports_suspend as true
+ /* so far signle_pdev_only chips have supports_suspend as true
* and only the first pdev is valid.
*/
pdev = ath11k_core_get_single_pdev(ab);
@@ -1002,29 +952,29 @@ int ath11k_core_resume(struct ath11k_base *ab)
if (!ar || ar->state != ATH11K_STATE_OFF)
return 0;
- time_left = wait_for_completion_timeout(&ab->restart_completed,
- ATH11K_RESET_TIMEOUT_HZ);
- if (time_left == 0) {
- ath11k_warn(ab, "timeout while waiting for restart complete");
- return -ETIMEDOUT;
+ ret = ath11k_hif_resume(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret);
+ return ret;
}
- if (ab->hw_params.current_cc_support &&
- ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
- ret = ath11k_reg_set_cc(ar);
- if (ret) {
- ath11k_warn(ab, "failed to set country code during resume: %d\n",
- ret);
- return ret;
- }
- }
+ ath11k_hif_ce_irq_enable(ab);
+ ath11k_hif_irq_enable(ab);
ret = ath11k_dp_rx_pktlog_start(ab);
- if (ret)
+ if (ret) {
ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n",
ret);
+ return ret;
+ }
- return ret;
+ ret = ath11k_wow_wakeup(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to wakeup wow during resume: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(ath11k_core_resume);
@@ -2119,8 +2069,6 @@ static void ath11k_core_restart(struct work_struct *work)
if (!ab->is_reset)
ath11k_core_post_reconfigure_recovery(ab);
-
- complete(&ab->restart_completed);
}
static void ath11k_core_reset(struct work_struct *work)
@@ -2190,7 +2138,7 @@ static void ath11k_core_reset(struct work_struct *work)
ath11k_hif_irq_disable(ab);
ath11k_hif_ce_irq_disable(ab);
- ath11k_hif_power_down(ab, false);
+ ath11k_hif_power_down(ab);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n");
@@ -2263,7 +2211,7 @@ void ath11k_core_deinit(struct ath11k_base *ab)
mutex_unlock(&ab->core_lock);
- ath11k_hif_power_down(ab, false);
+ ath11k_hif_power_down(ab);
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
ath11k_fw_destroy(ab);
@@ -2316,7 +2264,6 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed);
- init_completion(&ab->restart_completed);
ab->dev = dev;
ab->hif.bus = bus;
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index df24f0e409af..09c37e19a168 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -399,6 +399,7 @@ struct ath11k_vif {
u8 bssid[ETH_ALEN];
struct cfg80211_bitrate_mask bitrate_mask;
struct delayed_work connection_loss_work;
+ struct work_struct bcn_tx_work;
int num_legacy_stations;
int rtscts_prot_mode;
int txpower;
@@ -406,11 +407,17 @@ struct ath11k_vif {
bool wpaie_present;
bool bcca_zero_sent;
bool do_not_send_tmpl;
- struct ieee80211_chanctx_conf chanctx;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
struct ath11k_reg_tpc_power_info reg_tpc_info;
+
+ /* Must be last - ends in a flexible-array member.
+ *
+ * FIXME: Driver should not copy struct ieee80211_chanctx_conf,
+ * especially because it has a flexible array. Find a better way.
+ */
+ struct ieee80211_chanctx_conf chanctx;
};
struct ath11k_vif_iter {
@@ -1036,8 +1043,6 @@ struct ath11k_base {
DECLARE_BITMAP(fw_features, ATH11K_FW_FEATURE_COUNT);
} fw;
- struct completion restart_completed;
-
#ifdef CONFIG_NL80211_TESTMODE
struct {
u32 data_pos;
@@ -1237,10 +1242,8 @@ void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
int ath11k_core_check_dt(struct ath11k_base *ath11k);
int ath11k_core_check_smbios(struct ath11k_base *ab);
void ath11k_core_halt(struct ath11k *ar);
-int ath11k_core_resume_early(struct ath11k_base *ab);
int ath11k_core_resume(struct ath11k_base *ab);
int ath11k_core_suspend(struct ath11k_base *ab);
-int ath11k_core_suspend_late(struct ath11k_base *ab);
void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab);
bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab);
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 2f6dd69d3be2..65d2bc0687c8 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -1305,18 +1305,6 @@ struct htt_ppdu_stats_user_rate {
#define HTT_TX_INFO_PEERID(_flags) \
FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M, _flags)
-struct htt_tx_ppdu_stats_info {
- struct htt_tlv tlv_hdr;
- u32 tx_success_bytes;
- u32 tx_retry_bytes;
- u32 tx_failed_bytes;
- u32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
- u16 tx_success_msdus;
- u16 tx_retry_msdus;
- u16 tx_failed_msdus;
- u16 tx_duration; /* united in us */
-} __packed;
-
enum htt_ppdu_stats_usr_compln_status {
HTT_PPDU_STATS_USER_STATUS_OK,
HTT_PPDU_STATS_USER_STATUS_FILTERED,
@@ -1364,17 +1352,6 @@ struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
u32 success_bytes;
} __packed;
-struct htt_ppdu_stats_usr_cmn_array {
- struct htt_tlv tlv_hdr;
- u32 num_ppdu_stats;
- /* tx_ppdu_stats_info is filled by multiple struct htt_tx_ppdu_stats_info
- * elements.
- * tx_ppdu_stats_info is variable length, with length =
- * number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
- */
- struct htt_tx_ppdu_stats_info tx_ppdu_info[];
-} __packed;
-
struct htt_ppdu_user_stats {
u16 peer_id;
u32 tlv_flags;
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 86485580dd89..c087d8a0f5b2 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -2697,7 +2697,7 @@ try_again:
if (unlikely(push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
dev_kfree_skb_any(msdu);
- ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
+ ab->soc_stats.hal_reo_error[ring_id]++;
continue;
}
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index c4c6cc09c7c1..674ff772b181 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -18,7 +18,7 @@ struct ath11k_hif_ops {
int (*start)(struct ath11k_base *ab);
void (*stop)(struct ath11k_base *ab);
int (*power_up)(struct ath11k_base *ab);
- void (*power_down)(struct ath11k_base *ab, bool is_suspend);
+ void (*power_down)(struct ath11k_base *ab);
int (*suspend)(struct ath11k_base *ab);
int (*resume)(struct ath11k_base *ab);
int (*map_service_to_pipe)(struct ath11k_base *ab, u16 service_id,
@@ -67,18 +67,12 @@ static inline void ath11k_hif_irq_disable(struct ath11k_base *ab)
static inline int ath11k_hif_power_up(struct ath11k_base *ab)
{
- if (!ab->hif.ops->power_up)
- return -EOPNOTSUPP;
-
return ab->hif.ops->power_up(ab);
}
-static inline void ath11k_hif_power_down(struct ath11k_base *ab, bool is_suspend)
+static inline void ath11k_hif_power_down(struct ath11k_base *ab)
{
- if (!ab->hif.ops->power_down)
- return;
-
- ab->hif.ops->power_down(ab, is_suspend);
+ ab->hif.ops->power_down(ab);
}
static inline int ath11k_hif_suspend(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index ba910ae2c676..f8068d2e848c 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -6599,6 +6599,16 @@ static int ath11k_mac_vdev_delete(struct ath11k *ar, struct ath11k_vif *arvif)
return ret;
}
+static void ath11k_mac_bcn_tx_work(struct work_struct *work)
+{
+ struct ath11k_vif *arvif = container_of(work, struct ath11k_vif,
+ bcn_tx_work);
+
+ mutex_lock(&arvif->ar->conf_mutex);
+ ath11k_mac_bcn_tx_event(arvif);
+ mutex_unlock(&arvif->ar->conf_mutex);
+}
+
static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -6637,6 +6647,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
arvif->vif = vif;
INIT_LIST_HEAD(&arvif->list);
+ INIT_WORK(&arvif->bcn_tx_work, ath11k_mac_bcn_tx_work);
INIT_DELAYED_WORK(&arvif->connection_loss_work,
ath11k_mac_vif_sta_connection_loss_work);
@@ -6879,6 +6890,7 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
int i;
cancel_delayed_work_sync(&arvif->connection_loss_work);
+ cancel_work_sync(&arvif->bcn_tx_work);
mutex_lock(&ar->conf_mutex);
@@ -7900,6 +7912,7 @@ static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar,
}
if (psd) {
+ arvif->reg_tpc_info.is_psd_power = true;
arvif->reg_tpc_info.num_pwr_levels = psd->count;
for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index ab182690aed3..6974a551883f 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -453,17 +453,9 @@ int ath11k_mhi_start(struct ath11k_pci *ab_pci)
return 0;
}
-void ath11k_mhi_stop(struct ath11k_pci *ab_pci, bool is_suspend)
+void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
{
- /* During suspend we need to use mhi_power_down_keep_dev()
- * workaround, otherwise ath11k_core_resume() will timeout
- * during resume.
- */
- if (is_suspend)
- mhi_power_down_keep_dev(ab_pci->mhi_ctrl, true);
- else
- mhi_power_down(ab_pci->mhi_ctrl, true);
-
+ mhi_power_down(ab_pci->mhi_ctrl, true);
mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
}
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
index 2d567705e732..a682aad52fc5 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.h
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -18,7 +18,7 @@
#define MHICTRL_RESET_MASK 0x2
int ath11k_mhi_start(struct ath11k_pci *ar_pci);
-void ath11k_mhi_stop(struct ath11k_pci *ar_pci, bool is_suspend);
+void ath11k_mhi_stop(struct ath11k_pci *ar_pci);
int ath11k_mhi_register(struct ath11k_pci *ar_pci);
void ath11k_mhi_unregister(struct ath11k_pci *ar_pci);
void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab);
@@ -26,4 +26,5 @@ void ath11k_mhi_clear_vector(struct ath11k_base *ab);
int ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
int ath11k_mhi_resume(struct ath11k_pci *ar_pci);
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 8d63b84d1261..be9d2c69cc41 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -638,7 +638,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
return 0;
}
-static void ath11k_pci_power_down(struct ath11k_base *ab, bool is_suspend)
+static void ath11k_pci_power_down(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -649,7 +649,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab, bool is_suspend)
ath11k_pci_msi_disable(ab_pci);
- ath11k_mhi_stop(ab_pci, is_suspend);
+ ath11k_mhi_stop(ab_pci);
clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pci_sw_reset(ab_pci->ab, false);
}
@@ -970,7 +970,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_pci_power_down(ab, false);
+ ath11k_pci_power_down(ab);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
goto qmi_fail;
@@ -998,7 +998,7 @@ static void ath11k_pci_shutdown(struct pci_dev *pdev)
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
- ath11k_pci_power_down(ab, false);
+ ath11k_pci_power_down(ab);
}
static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
@@ -1035,39 +1035,9 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
return ret;
}
-static __maybe_unused int ath11k_pci_pm_suspend_late(struct device *dev)
-{
- struct ath11k_base *ab = dev_get_drvdata(dev);
- int ret;
-
- ret = ath11k_core_suspend_late(ab);
- if (ret)
- ath11k_warn(ab, "failed to late suspend core: %d\n", ret);
-
- /* Similar to ath11k_pci_pm_suspend(), we return success here
- * even error happens, to allow system suspend/hibernation survive.
- */
- return 0;
-}
-
-static __maybe_unused int ath11k_pci_pm_resume_early(struct device *dev)
-{
- struct ath11k_base *ab = dev_get_drvdata(dev);
- int ret;
-
- ret = ath11k_core_resume_early(ab);
- if (ret)
- ath11k_warn(ab, "failed to early resume core: %d\n", ret);
-
- return ret;
-}
-
-static const struct dev_pm_ops __maybe_unused ath11k_pci_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend,
- ath11k_pci_pm_resume)
- SET_LATE_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend_late,
- ath11k_pci_pm_resume_early)
-};
+static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops,
+ ath11k_pci_pm_suspend,
+ ath11k_pci_pm_resume);
static struct pci_driver ath11k_pci_driver = {
.name = "ath11k_pci",
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 1bc648920ab6..f477afd325de 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -2877,7 +2877,7 @@ int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
}
/* reset the firmware */
- ath11k_hif_power_down(ab, false);
+ ath11k_hif_power_down(ab);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_QMI, "exit wait for cold boot done\n");
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 38f175dd1557..87abfa547529 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -7404,7 +7404,9 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s
rcu_read_unlock();
return;
}
- ath11k_mac_bcn_tx_event(arvif);
+
+ queue_work(ab->workqueue, &arvif->bcn_tx_work);
+
rcu_read_unlock();
}
@@ -8356,7 +8358,7 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
if (ar->dfs_block_radar_events)
ath11k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
exit:
rcu_read_unlock();
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index cdfd43a7321a..7f2e9a9b4097 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -287,7 +287,6 @@ struct ath12k_vif {
int txpower;
bool rsnie_present;
bool wpaie_present;
- struct ieee80211_chanctx_conf chanctx;
u32 key_cipher;
u8 tx_encap_type;
u8 vdev_stats_id;
@@ -295,6 +294,13 @@ struct ath12k_vif {
bool ps;
struct ath12k_vif_cache *cache;
struct ath12k_rekey_data rekey_data;
+
+ /* Must be last - ends in a flexible-array member.
+ *
+ * FIXME: Driver should not copy struct ieee80211_chanctx_conf,
+ * especially because it has a flexible array. Find a better way.
+ */
+ struct ieee80211_chanctx_conf chanctx;
};
struct ath12k_vif_iter {
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
index ce80e7b5175b..f1b7e74aefe4 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -1117,6 +1117,336 @@ ath12k_htt_print_tx_tqm_pdev_stats_tlv(const void *tag_buf, u16 tag_len,
stats_req->buf_len = len;
}
+static void
+ath12k_htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "tcl2fw_entry_count = %u\n",
+ le32_to_cpu(htt_stats_buf->tcl2fw_entry_count));
+ len += scnprintf(buf + len, buf_len - len, "not_to_fw = %u\n",
+ le32_to_cpu(htt_stats_buf->not_to_fw));
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_pdev_vdev_peer));
+ len += scnprintf(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u\n",
+ le32_to_cpu(htt_stats_buf->tcl_res_invalid_addrx));
+ len += scnprintf(buf + len, buf_len - len, "wbm2fw_entry_count = %u\n",
+ le32_to_cpu(htt_stats_buf->wbm2fw_entry_count));
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_pdev));
+ len += scnprintf(buf + len, buf_len - len, "tcl_res_addrx_timeout = %u\n",
+ le32_to_cpu(htt_stats_buf->tcl_res_addrx_timeout));
+ len += scnprintf(buf + len, buf_len - len, "invalid_vdev = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_vdev));
+ len += scnprintf(buf + len, buf_len - len, "invalid_tcl_exp_frame_desc = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_tcl_exp_frame_desc));
+ len += scnprintf(buf + len, buf_len - len, "vdev_id_mismatch_count = %u\n\n",
+ le32_to_cpu(htt_stats_buf->vdev_id_mismatch_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_eapol_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "m1_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->m1_packets));
+ len += scnprintf(buf + len, buf_len - len, "m2_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->m2_packets));
+ len += scnprintf(buf + len, buf_len - len, "m3_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->m3_packets));
+ len += scnprintf(buf + len, buf_len - len, "m4_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->m4_packets));
+ len += scnprintf(buf + len, buf_len - len, "g1_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->g1_packets));
+ len += scnprintf(buf + len, buf_len - len, "g2_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->g2_packets));
+ len += scnprintf(buf + len, buf_len - len, "rc4_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->rc4_packets));
+ len += scnprintf(buf + len, buf_len - len, "eap_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->eap_packets));
+ len += scnprintf(buf + len, buf_len - len, "eapol_start_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->eapol_start_packets));
+ len += scnprintf(buf + len, buf_len - len, "eapol_logoff_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->eapol_logoff_packets));
+ len += scnprintf(buf + len, buf_len - len, "eapol_encap_asf_packets = %u\n\n",
+ le32_to_cpu(htt_stats_buf->eapol_encap_asf_packets));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_classify_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_classify_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "arp_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->arp_packets));
+ len += scnprintf(buf + len, buf_len - len, "igmp_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->igmp_packets));
+ len += scnprintf(buf + len, buf_len - len, "dhcp_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->dhcp_packets));
+ len += scnprintf(buf + len, buf_len - len, "host_inspected = %u\n",
+ le32_to_cpu(htt_stats_buf->host_inspected));
+ len += scnprintf(buf + len, buf_len - len, "htt_included = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_included));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_mcs = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_mcs));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_nss = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_nss));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_preamble_type = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_preamble_type));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_chainmask = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_chainmask));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_guard_interval = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_guard_interval));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_retries = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_retries));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_bw_info = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_bw_info));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_power = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_power));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_key_flags));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_no_encryption = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_no_encryption));
+ len += scnprintf(buf + len, buf_len - len, "fse_entry_count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_entry_count));
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_be = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_priority_be));
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_high = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_priority_high));
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_low = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_priority_low));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_be));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_over_sub));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_bursty));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_interactive));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_periodic));
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_alloc = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_hwqueue_alloc));
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_created = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_hwqueue_created));
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_hwqueue_send_to_host));
+ len += scnprintf(buf + len, buf_len - len, "mcast_entry = %u\n",
+ le32_to_cpu(htt_stats_buf->mcast_entry));
+ len += scnprintf(buf + len, buf_len - len, "bcast_entry = %u\n",
+ le32_to_cpu(htt_stats_buf->bcast_entry));
+ len += scnprintf(buf + len, buf_len - len, "htt_update_peer_cache = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_update_peer_cache));
+ len += scnprintf(buf + len, buf_len - len, "htt_learning_frame = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_learning_frame));
+ len += scnprintf(buf + len, buf_len - len, "fse_invalid_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_invalid_peer));
+ len += scnprintf(buf + len, buf_len - len, "mec_notify = %u\n\n",
+ le32_to_cpu(htt_stats_buf->mec_notify));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_classify_failed_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ap_bss_peer_not_found = %u\n",
+ le32_to_cpu(htt_stats_buf->ap_bss_peer_not_found));
+ len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->ap_bcast_mcast_no_peer));
+ len += scnprintf(buf + len, buf_len - len, "sta_delete_in_progress = %u\n",
+ le32_to_cpu(htt_stats_buf->sta_delete_in_progress));
+ len += scnprintf(buf + len, buf_len - len, "ibss_no_bss_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->ibss_no_bss_peer));
+ len += scnprintf(buf + len, buf_len - len, "invalid_vdev_type = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_vdev_type));
+ len += scnprintf(buf + len, buf_len - len, "invalid_ast_peer_entry = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_ast_peer_entry));
+ len += scnprintf(buf + len, buf_len - len, "peer_entry_invalid = %u\n",
+ le32_to_cpu(htt_stats_buf->peer_entry_invalid));
+ len += scnprintf(buf + len, buf_len - len, "ethertype_not_ip = %u\n",
+ le32_to_cpu(htt_stats_buf->ethertype_not_ip));
+ len += scnprintf(buf + len, buf_len - len, "eapol_lookup_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->eapol_lookup_failed));
+ len += scnprintf(buf + len, buf_len - len, "qpeer_not_allow_data = %u\n",
+ le32_to_cpu(htt_stats_buf->qpeer_not_allow_data));
+ len += scnprintf(buf + len, buf_len - len, "fse_tid_override = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_tid_override));
+ len += scnprintf(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u\n",
+ le32_to_cpu(htt_stats_buf->ipv6_jumbogram_zero_length));
+ len += scnprintf(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n",
+ le32_to_cpu(htt_stats_buf->qos_to_non_qos_in_prog));
+ len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_eapol = %u\n",
+ le32_to_cpu(htt_stats_buf->ap_bcast_mcast_eapol));
+ len += scnprintf(buf + len, buf_len - len, "unicast_on_ap_bss_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->unicast_on_ap_bss_peer));
+ len += scnprintf(buf + len, buf_len - len, "ap_vdev_invalid = %u\n",
+ le32_to_cpu(htt_stats_buf->ap_vdev_invalid));
+ len += scnprintf(buf + len, buf_len - len, "incomplete_llc = %u\n",
+ le32_to_cpu(htt_stats_buf->incomplete_llc));
+ len += scnprintf(buf + len, buf_len - len, "eapol_duplicate_m3 = %u\n",
+ le32_to_cpu(htt_stats_buf->eapol_duplicate_m3));
+ len += scnprintf(buf + len, buf_len - len, "eapol_duplicate_m4 = %u\n\n",
+ le32_to_cpu(htt_stats_buf->eapol_duplicate_m4));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_classify_status_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "eok = %u\n",
+ le32_to_cpu(htt_stats_buf->eok));
+ len += scnprintf(buf + len, buf_len - len, "classify_done = %u\n",
+ le32_to_cpu(htt_stats_buf->classify_done));
+ len += scnprintf(buf + len, buf_len - len, "lookup_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->lookup_failed));
+ len += scnprintf(buf + len, buf_len - len, "send_host_dhcp = %u\n",
+ le32_to_cpu(htt_stats_buf->send_host_dhcp));
+ len += scnprintf(buf + len, buf_len - len, "send_host_mcast = %u\n",
+ le32_to_cpu(htt_stats_buf->send_host_mcast));
+ len += scnprintf(buf + len, buf_len - len, "send_host_unknown_dest = %u\n",
+ le32_to_cpu(htt_stats_buf->send_host_unknown_dest));
+ len += scnprintf(buf + len, buf_len - len, "send_host = %u\n",
+ le32_to_cpu(htt_stats_buf->send_host));
+ len += scnprintf(buf + len, buf_len - len, "status_invalid = %u\n\n",
+ le32_to_cpu(htt_stats_buf->status_invalid));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_enqueue_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "enqueued_pkts = %u\n",
+ le32_to_cpu(htt_stats_buf->enqueued_pkts));
+ len += scnprintf(buf + len, buf_len - len, "to_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->to_tqm));
+ len += scnprintf(buf + len, buf_len - len, "to_tqm_bypass = %u\n\n",
+ le32_to_cpu(htt_stats_buf->to_tqm_bypass));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_enqueue_discard_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "discarded_pkts = %u\n",
+ le32_to_cpu(htt_stats_buf->discarded_pkts));
+ len += scnprintf(buf + len, buf_len - len, "local_frames = %u\n",
+ le32_to_cpu(htt_stats_buf->local_frames));
+ len += scnprintf(buf + len, buf_len - len, "is_ext_msdu = %u\n\n",
+ le32_to_cpu(htt_stats_buf->is_ext_msdu));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_compl_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_compl_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "tcl_dummy_frame = %u\n",
+ le32_to_cpu(htt_stats_buf->tcl_dummy_frame));
+ len += scnprintf(buf + len, buf_len - len, "tqm_dummy_frame = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_dummy_frame));
+ len += scnprintf(buf + len, buf_len - len, "tqm_notify_frame = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_notify_frame));
+ len += scnprintf(buf + len, buf_len - len, "fw2wbm_enq = %u\n",
+ le32_to_cpu(htt_stats_buf->fw2wbm_enq));
+ len += scnprintf(buf + len, buf_len - len, "tqm_bypass_frame = %u\n\n",
+ le32_to_cpu(htt_stats_buf->tqm_bypass_frame));
+
+ stats_req->buf_len = len;
+}
+
static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@@ -1198,6 +1528,30 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_TX_TQM_PDEV_TAG:
ath12k_htt_print_tx_tqm_pdev_stats_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_TX_DE_CMN_TAG:
+ ath12k_htt_print_tx_de_cmn_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG:
+ ath12k_htt_print_tx_de_eapol_packets_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG:
+ ath12k_htt_print_tx_de_classify_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG:
+ ath12k_htt_print_tx_de_classify_failed_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG:
+ ath12k_htt_print_tx_de_classify_status_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG:
+ ath12k_htt_print_tx_de_enqueue_packets_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG:
+ ath12k_htt_print_tx_de_enqueue_discard_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_COMPL_STATS_TAG:
+ ath12k_htt_print_tx_de_compl_stats_tlv(tag_buf, len, stats_req);
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
index 6294a082cf8a..d52b26b23e65 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
@@ -128,6 +128,7 @@ enum ath12k_dbg_htt_ext_stats_type {
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+ ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
/* keep this last */
ATH12K_DBG_HTT_NUM_EXT_STATS,
@@ -143,6 +144,13 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13,
HTT_STATS_TX_TQM_CMN_TAG = 14,
HTT_STATS_TX_TQM_PDEV_TAG = 15,
+ HTT_STATS_TX_DE_EAPOL_PACKETS_TAG = 17,
+ HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG = 18,
+ HTT_STATS_TX_DE_CLASSIFY_STATS_TAG = 19,
+ HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG = 20,
+ HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG = 21,
+ HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG = 22,
+ HTT_STATS_TX_DE_CMN_TAG = 23,
HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
HTT_STATS_TX_SCHED_CMN_TAG = 37,
HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
@@ -150,6 +158,7 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44,
HTT_STATS_HW_INTR_MISC_TAG = 54,
HTT_STATS_HW_PDEV_ERRS_TAG = 56,
+ HTT_STATS_TX_DE_COMPL_STATS_TAG = 65,
HTT_STATS_WHAL_TX_TAG = 66,
HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67,
HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
@@ -564,4 +573,121 @@ struct ath12k_htt_tx_tqm_pdev_stats_tlv {
__le32 sched_nonudp_notify2;
} __packed;
+struct ath12k_htt_tx_de_cmn_stats_tlv {
+ __le32 mac_id__word;
+ __le32 tcl2fw_entry_count;
+ __le32 not_to_fw;
+ __le32 invalid_pdev_vdev_peer;
+ __le32 tcl_res_invalid_addrx;
+ __le32 wbm2fw_entry_count;
+ __le32 invalid_pdev;
+ __le32 tcl_res_addrx_timeout;
+ __le32 invalid_vdev;
+ __le32 invalid_tcl_exp_frame_desc;
+ __le32 vdev_id_mismatch_cnt;
+} __packed;
+
+struct ath12k_htt_tx_de_eapol_packets_stats_tlv {
+ __le32 m1_packets;
+ __le32 m2_packets;
+ __le32 m3_packets;
+ __le32 m4_packets;
+ __le32 g1_packets;
+ __le32 g2_packets;
+ __le32 rc4_packets;
+ __le32 eap_packets;
+ __le32 eapol_start_packets;
+ __le32 eapol_logoff_packets;
+ __le32 eapol_encap_asf_packets;
+} __packed;
+
+struct ath12k_htt_tx_de_classify_stats_tlv {
+ __le32 arp_packets;
+ __le32 igmp_packets;
+ __le32 dhcp_packets;
+ __le32 host_inspected;
+ __le32 htt_included;
+ __le32 htt_valid_mcs;
+ __le32 htt_valid_nss;
+ __le32 htt_valid_preamble_type;
+ __le32 htt_valid_chainmask;
+ __le32 htt_valid_guard_interval;
+ __le32 htt_valid_retries;
+ __le32 htt_valid_bw_info;
+ __le32 htt_valid_power;
+ __le32 htt_valid_key_flags;
+ __le32 htt_valid_no_encryption;
+ __le32 fse_entry_count;
+ __le32 fse_priority_be;
+ __le32 fse_priority_high;
+ __le32 fse_priority_low;
+ __le32 fse_traffic_ptrn_be;
+ __le32 fse_traffic_ptrn_over_sub;
+ __le32 fse_traffic_ptrn_bursty;
+ __le32 fse_traffic_ptrn_interactive;
+ __le32 fse_traffic_ptrn_periodic;
+ __le32 fse_hwqueue_alloc;
+ __le32 fse_hwqueue_created;
+ __le32 fse_hwqueue_send_to_host;
+ __le32 mcast_entry;
+ __le32 bcast_entry;
+ __le32 htt_update_peer_cache;
+ __le32 htt_learning_frame;
+ __le32 fse_invalid_peer;
+ __le32 mec_notify;
+} __packed;
+
+struct ath12k_htt_tx_de_classify_failed_stats_tlv {
+ __le32 ap_bss_peer_not_found;
+ __le32 ap_bcast_mcast_no_peer;
+ __le32 sta_delete_in_progress;
+ __le32 ibss_no_bss_peer;
+ __le32 invalid_vdev_type;
+ __le32 invalid_ast_peer_entry;
+ __le32 peer_entry_invalid;
+ __le32 ethertype_not_ip;
+ __le32 eapol_lookup_failed;
+ __le32 qpeer_not_allow_data;
+ __le32 fse_tid_override;
+ __le32 ipv6_jumbogram_zero_length;
+ __le32 qos_to_non_qos_in_prog;
+ __le32 ap_bcast_mcast_eapol;
+ __le32 unicast_on_ap_bss_peer;
+ __le32 ap_vdev_invalid;
+ __le32 incomplete_llc;
+ __le32 eapol_duplicate_m3;
+ __le32 eapol_duplicate_m4;
+} __packed;
+
+struct ath12k_htt_tx_de_classify_status_stats_tlv {
+ __le32 eok;
+ __le32 classify_done;
+ __le32 lookup_failed;
+ __le32 send_host_dhcp;
+ __le32 send_host_mcast;
+ __le32 send_host_unknown_dest;
+ __le32 send_host;
+ __le32 status_invalid;
+} __packed;
+
+struct ath12k_htt_tx_de_enqueue_packets_stats_tlv {
+ __le32 enqueued_pkts;
+ __le32 to_tqm;
+ __le32 to_tqm_bypass;
+} __packed;
+
+struct ath12k_htt_tx_de_enqueue_discard_stats_tlv {
+ __le32 discarded_pkts;
+ __le32 local_frames;
+ __le32 is_ext_msdu;
+} __packed;
+
+struct ath12k_htt_tx_de_compl_stats_tlv {
+ __le32 tcl_dummy_frame;
+ __le32 tqm_dummy_frame;
+ __le32 tqm_notify_frame;
+ __le32 fw2wbm_enq;
+ __le32 tqm_bypass_frame;
+} __packed;
+
#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index b77497c14ac4..2fb18b83b3ee 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -1495,18 +1495,6 @@ struct htt_ppdu_stats_user_rate {
#define HTT_TX_INFO_PEERID(_flags) \
u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M)
-struct htt_tx_ppdu_stats_info {
- struct htt_tlv tlv_hdr;
- __le32 tx_success_bytes;
- __le32 tx_retry_bytes;
- __le32 tx_failed_bytes;
- __le32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
- __le16 tx_success_msdus;
- __le16 tx_retry_msdus;
- __le16 tx_failed_msdus;
- __le16 tx_duration; /* united in us */
-} __packed;
-
enum htt_ppdu_stats_usr_compln_status {
HTT_PPDU_STATS_USER_STATUS_OK,
HTT_PPDU_STATS_USER_STATUS_FILTERED,
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 14236d0a0c89..91e3393f7b5f 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -2681,7 +2681,7 @@ try_again:
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
dev_kfree_skb_any(msdu);
- ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
+ ab->soc_stats.hal_reo_error[ring_id]++;
continue;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index d08c04343e90..44406e0b4a34 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -162,6 +162,60 @@ static int ath12k_dp_prepare_htt_metadata(struct sk_buff *skb)
return 0;
}
+static void ath12k_dp_tx_move_payload(struct sk_buff *skb,
+ unsigned long delta,
+ bool head)
+{
+ unsigned long len = skb->len;
+
+ if (head) {
+ skb_push(skb, delta);
+ memmove(skb->data, skb->data + delta, len);
+ skb_trim(skb, len);
+ } else {
+ skb_put(skb, delta);
+ memmove(skb->data + delta, skb->data, len);
+ skb_pull(skb, delta);
+ }
+}
+
+static int ath12k_dp_tx_align_payload(struct ath12k_base *ab,
+ struct sk_buff **pskb)
+{
+ u32 iova_mask = ab->hw_params->iova_mask;
+ unsigned long offset, delta1, delta2;
+ struct sk_buff *skb2, *skb = *pskb;
+ unsigned int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+ int ret = 0;
+
+ offset = (unsigned long)skb->data & iova_mask;
+ delta1 = offset;
+ delta2 = iova_mask - offset + 1;
+
+ if (headroom >= delta1) {
+ ath12k_dp_tx_move_payload(skb, delta1, true);
+ } else if (tailroom >= delta2) {
+ ath12k_dp_tx_move_payload(skb, delta2, false);
+ } else {
+ skb2 = skb_realloc_headroom(skb, iova_mask);
+ if (!skb2) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ offset = (unsigned long)skb2->data & iova_mask;
+ if (offset)
+ ath12k_dp_tx_move_payload(skb2, offset, true);
+ *pskb = skb2;
+ }
+
+out:
+ return ret;
+}
+
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
struct sk_buff *skb)
{
@@ -184,6 +238,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
bool tcl_ring_retry;
bool msdu_ext_desc = false;
bool add_htt_metadata = false;
+ u32 iova_mask = ab->hw_params->iova_mask;
if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
@@ -279,6 +334,23 @@ tcl_ring_sel:
goto fail_remove_tx_buf;
}
+ if (iova_mask &&
+ (unsigned long)skb->data & iova_mask) {
+ ret = ath12k_dp_tx_align_payload(ab, &skb);
+ if (ret) {
+ ath12k_warn(ab, "failed to align TX buffer %d\n", ret);
+ /* don't bail out, give original buffer
+ * a chance even unaligned.
+ */
+ goto map;
+ }
+
+ /* hdr is pointing to a wrong place after alignment,
+ * so refresh it for later use.
+ */
+ hdr = (void *)skb->data;
+ }
+map:
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, ti.paddr)) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index 2e11ea763574..ec1bda95e555 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -924,6 +924,10 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.acpi_guid = NULL,
.supports_dynamic_smps_6ghz = true,
+
+ .iova_mask = 0,
+
+ .supports_aspm = false,
},
{
.name = "wcn7850 hw2.0",
@@ -1000,6 +1004,10 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.acpi_guid = &wcn7850_uuid,
.supports_dynamic_smps_6ghz = false,
+
+ .iova_mask = ATH12K_PCIE_MAX_PAYLOAD_SIZE - 1,
+
+ .supports_aspm = true,
},
{
.name = "qcn9274 hw2.0",
@@ -1072,6 +1080,10 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.acpi_guid = NULL,
.supports_dynamic_smps_6ghz = true,
+
+ .iova_mask = 0,
+
+ .supports_aspm = false,
},
};
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index e792eb6b249b..8d52182e28ae 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -96,6 +96,8 @@
#define ATH12K_M3_FILE "m3.bin"
#define ATH12K_REGDB_FILE_NAME "regdb.bin"
+#define ATH12K_PCIE_MAX_PAYLOAD_SIZE 128
+
enum ath12k_hw_rate_cck {
ATH12K_HW_RATE_CCK_LP_11M = 0,
ATH12K_HW_RATE_CCK_LP_5_5M,
@@ -187,6 +189,7 @@ struct ath12k_hw_params {
bool tcl_ring_retry:1;
bool reoq_lut_support:1;
bool supports_shadow_regs:1;
+ bool supports_aspm:1;
u32 num_tcl_banks;
u32 max_tx_ring;
@@ -215,6 +218,8 @@ struct ath12k_hw_params {
const guid_t *acpi_guid;
bool supports_dynamic_smps_6ghz;
+
+ u32 iova_mask;
};
struct ath12k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 8106297f0bc1..137394c36460 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -2196,9 +2196,8 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
* request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu
* length.
*/
- ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] &
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >>
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK;
+ ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3],
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
if (ampdu_factor) {
if (sta->deflink.vht_cap.vht_supported)
@@ -3664,7 +3663,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ath12k *ar, *prev_ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
- struct ath12k_wmi_scan_req_arg arg = {};
+ struct ath12k_wmi_scan_req_arg *arg = NULL;
int ret;
int i;
bool create = true;
@@ -3746,42 +3745,47 @@ scan:
if (ret)
goto exit;
- ath12k_wmi_start_scan_init(ar, &arg);
- arg.vdev_id = arvif->vdev_id;
- arg.scan_id = ATH12K_SCAN_ID;
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ath12k_wmi_start_scan_init(ar, arg);
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH12K_SCAN_ID;
if (req->ie_len) {
- arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
- if (!arg.extraie.ptr) {
+ arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
+ if (!arg->extraie.ptr) {
ret = -ENOMEM;
goto exit;
}
- arg.extraie.len = req->ie_len;
+ arg->extraie.len = req->ie_len;
}
if (req->n_ssids) {
- arg.num_ssids = req->n_ssids;
- for (i = 0; i < arg.num_ssids; i++)
- arg.ssid[i] = req->ssids[i];
+ arg->num_ssids = req->n_ssids;
+ for (i = 0; i < arg->num_ssids; i++)
+ arg->ssid[i] = req->ssids[i];
} else {
- arg.scan_f_passive = 1;
+ arg->scan_f_passive = 1;
}
if (req->n_channels) {
- arg.num_chan = req->n_channels;
- arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
- GFP_KERNEL);
-
- if (!arg.chan_list) {
+ arg->num_chan = req->n_channels;
+ arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list),
+ GFP_KERNEL);
+ if (!arg->chan_list) {
ret = -ENOMEM;
goto exit;
}
- for (i = 0; i < arg.num_chan; i++)
- arg.chan_list[i] = req->channels[i]->center_freq;
+ for (i = 0; i < arg->num_chan; i++)
+ arg->chan_list[i] = req->channels[i]->center_freq;
}
- ret = ath12k_start_scan(ar, &arg);
+ ret = ath12k_start_scan(ar, arg);
if (ret) {
ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
@@ -3791,14 +3795,15 @@ scan:
/* Add a margin to account for event/command processing */
ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
- msecs_to_jiffies(arg.max_scan_time +
+ msecs_to_jiffies(arg->max_scan_time +
ATH12K_MAC_SCAN_TIMEOUT_MSECS));
exit:
- kfree(arg.chan_list);
-
- if (req->ie_len)
- kfree(arg.extraie.ptr);
+ if (arg) {
+ kfree(arg->chan_list);
+ kfree(arg->extraie.ptr);
+ kfree(arg);
+ }
mutex_unlock(&ar->conf_mutex);
@@ -9193,6 +9198,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
hw->vif_data_size = sizeof(struct ath12k_vif);
hw->sta_data_size = sizeof(struct ath12k_sta);
+ hw->extra_tx_headroom = ab->hw_params->iova_mask;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 9e0b9e329bda..bd269aa1740b 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -954,7 +954,8 @@ static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab)
static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
{
- if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
+ if (ab_pci->ab->hw_params->supports_aspm &&
+ test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPMC,
ab_pci->link_ctl &
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 9f6be557365e..2cd3ff9b0164 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -1538,6 +1538,7 @@ int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
sizeof(*cmd));
cmd->req_type = cpu_to_le32(type);
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI bss chan info req type %d\n", type);
@@ -6788,7 +6789,7 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
if (ar->dfs_block_radar_events)
ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ath12k_ar_to_hw(ar));
+ ieee80211_radar_detected(ath12k_ar_to_hw(ar), NULL);
exit:
rcu_read_unlock();
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index f1f52175a52b..6a913f9b8315 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -3121,6 +3121,7 @@ struct wmi_pdev_bss_chan_info_req_cmd {
__le32 tlv_header;
/* ref wmi_bss_chan_info_req_type */
__le32 req_type;
+ __le32 pdev_id;
} __packed;
struct wmi_ap_ps_peer_cmd {
@@ -4085,7 +4086,6 @@ struct wmi_vdev_stopped_event {
} __packed;
struct wmi_pdev_bss_chan_info_event {
- __le32 pdev_id;
__le32 freq; /* Units in MHz */
__le32 noise_floor; /* units are dBm */
/* rx clear - how often the channel was unused */
@@ -4103,6 +4103,7 @@ struct wmi_pdev_bss_chan_info_event {
/*rx_cycle cnt for my bss in 64bits format */
__le32 rx_bss_cycle_count_low;
__le32 rx_bss_cycle_count_high;
+ __le32 pdev_id;
} __packed;
#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index abe41330fb69..4d88b02ffa79 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -59,7 +59,7 @@
#include <net/cfg80211.h>
#include <net/ieee80211_radiotap.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/mac80211.h>
#include "base.h"
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index eea4bda77608..d81b2ad0b095 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -44,7 +44,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <net/mac80211.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ath5k.h"
#include "base.h"
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 3f4ce4e9c532..90e0859a8e50 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -24,7 +24,7 @@
* Protocol Control Unit Functions *
\*********************************/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ath5k.h"
#include "reg.h"
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 7ee4e1616f45..4825f9cb9cb8 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -27,7 +27,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/sort.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ath5k.h"
#include "reg.h"
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 9fdb5283b39c..c67f163c0858 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -25,7 +25,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/pci.h> /* To determine if a card is pci-e */
#include <linux/log2.h>
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index fb5144e2d86c..f8a94d764be6 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -21,7 +21,7 @@
#include "hif-ops.h"
#include "trace.h"
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 944f46cdf34c..73c38a6b4880 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -14,7 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/kernel.h>
#include "hw.h"
#include "ar9003_phy.h"
diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
index a5eb43f30320..004ca5f536be 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
+++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
@@ -65,7 +65,7 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data,
dev_info(&pdev->dev, "fixup device configuration\n");
- mem = pcim_iomap(pdev, 0, 0);
+ mem = pci_iomap(pdev, 0, 0);
if (!mem) {
dev_err(&pdev->dev, "ioremap error\n");
return -EINVAL;
@@ -103,7 +103,7 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data,
pci_write_config_word(pdev, PCI_COMMAND, cmd);
pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, bar0);
- pcim_iounmap(pdev, mem);
+ pci_iounmap(pdev, mem);
pci_disable_device(pdev);
@@ -200,11 +200,9 @@ static int owl_probe(struct pci_dev *pdev,
const char *eeprom_name;
int err = 0;
- if (pcim_enable_device(pdev))
+ if (pci_enable_device(pdev))
return -EIO;
- pcim_pin_device(pdev);
-
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index fb270df75eb2..4b331c85509c 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -32,11 +32,8 @@ static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) {
for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) {
- if (sort[j] > sort[j - 1]) {
- nfval = sort[j];
- sort[j] = sort[j - 1];
- sort[j - 1] = nfval;
- }
+ if (sort[j] > sort[j - 1])
+ swap(sort[j], sort[j - 1]);
}
}
nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index d84e3ee7b5d9..eff894958a73 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -17,7 +17,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ath9k.h"
@@ -1325,11 +1325,11 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
int i = 0;
- data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_pkts_all +
+ data[i++] = ((u64)sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_pkts_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_pkts_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_pkts_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_pkts_all);
- data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_bytes_all +
+ data[i++] = ((u64)sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_bytes_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_bytes_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_bytes_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_bytes_all);
@@ -1380,8 +1380,6 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy = debugfs_create_dir("ath9k",
sc->hw->wiphy->debugfsdir);
- if (IS_ERR(sc->debug.debugfs_phy))
- return -ENOMEM;
#ifdef CONFIG_ATH_DEBUG
debugfs_create_file("debug", 0600, sc->debug.debugfs_phy,
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 11349218bc21..3689e12db9f7 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -280,7 +280,7 @@ ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe)
if (!pd->add_pulse(pd, pe, NULL))
return;
DFS_STAT_INC(sc, radar_detected);
- ieee80211_radar_detected(sc->hw);
+ ieee80211_radar_detected(sc->hw, NULL);
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 8e18e9b4ef48..426caa057396 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -116,7 +116,7 @@ static ssize_t write_file_simulate_radar(struct file *file,
{
struct ath_softc *sc = file->private_data;
- ieee80211_radar_detected(sc->hw);
+ ieee80211_radar_detected(sc->hw, NULL);
return count;
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 27b860b0c769..3e16cfe059f3 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -14,7 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "hw.h"
#include "ar9002_phy.h"
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index d85472ee4d85..c139ac49ccf6 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -14,7 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "hw.h"
#include "ar9002_phy.h"
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 84b31caf8ca6..5ba467cb7425 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -14,7 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "hw.h"
#include "ar9002_phy.h"
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 0c7841f95228..7265766cddbd 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -14,7 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "htc.h"
MODULE_FIRMWARE(HTC_7010_MODULE_FW);
@@ -716,8 +716,7 @@ static void ath9k_hif_usb_rx_cb(struct urb *urb)
}
resubmit:
- skb_reset_tail_pointer(skb);
- skb_trim(skb, 0);
+ __skb_set_length(skb, 0);
usb_anchor_urb(urb, &hif_dev->rx_submitted);
ret = usb_submit_urb(urb, GFP_ATOMIC);
@@ -754,8 +753,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
case -ESHUTDOWN:
goto free_skb;
default:
- skb_reset_tail_pointer(skb);
- skb_trim(skb, 0);
+ __skb_set_length(skb, 0);
goto resubmit;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index f7c6d9bc9311..9437d69877cc 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -486,8 +486,6 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME,
priv->hw->wiphy->debugfsdir);
- if (IS_ERR(priv->debug.debugfs_phy))
- return -ENOMEM;
ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 5982e0db45f9..c3a6368bfc68 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -21,7 +21,7 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/gpio.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "hw.h"
#include "hw-ops.h"
@@ -2732,7 +2732,7 @@ static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out,
if (ah->caps.gpio_requested & BIT(gpio))
return;
- err = gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label);
+ err = devm_gpio_request_one(ah->dev, gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label);
if (err) {
ath_err(ath9k_hw_common(ah), "request GPIO%d failed:%d\n",
gpio, err);
@@ -2801,10 +2801,8 @@ void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio)
WARN_ON(gpio >= ah->caps.num_gpio_pins);
- if (ah->caps.gpio_requested & BIT(gpio)) {
- gpio_free(gpio);
+ if (ah->caps.gpio_requested & BIT(gpio))
ah->caps.gpio_requested &= ~BIT(gpio);
- }
}
EXPORT_SYMBOL(ath9k_hw_gpio_free);
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index 6cdbee5beb07..20ceed0dd4be 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -36,7 +36,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "carl9170.h"
#include "cmd.h"
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index 85955572a705..b301e6fbce6c 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -15,7 +15,7 @@
*/
#include <linux/export.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ath.h"
#include "reg.h"
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 21a93fec284d..0ae436bd9b66 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -16,7 +16,7 @@
*/
#include <linux/export.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/mac80211.h>
#include "ath.h"
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 8e56dcf9309d..25b4ef9d3c9a 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -30,7 +30,7 @@
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "b43.h"
#include "main.h"
diff --git a/drivers/net/wireless/broadcom/b43/tables_lpphy.c b/drivers/net/wireless/broadcom/b43/tables_lpphy.c
index 71a7cd8dc787..fb70a1e3544b 100644
--- a/drivers/net/wireless/broadcom/b43/tables_lpphy.c
+++ b/drivers/net/wireless/broadcom/b43/tables_lpphy.c
@@ -1066,7 +1066,7 @@ static const u32 lpphy_papd_mult_table[] = {
0x00036963, 0x000339f2, 0x00030a89, 0x0002db28,
};
-static struct lpphy_tx_gain_table_entry lpphy_rev0_nopa_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev0_nopa_tx_gain_table[] = {
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 152, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 147, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 143, },
@@ -1197,7 +1197,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev0_nopa_tx_gain_table[] = {
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev0_2ghz_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev0_2ghz_tx_gain_table[] = {
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, },
@@ -1328,7 +1328,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev0_2ghz_tx_gain_table[] = {
{ .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 72, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev0_5ghz_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev0_5ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 99, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 96, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 93, },
@@ -1459,7 +1459,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev0_5ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev1_nopa_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev1_nopa_tx_gain_table[] = {
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 152, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 147, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 143, },
@@ -1599,7 +1599,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev1_nopa_tx_gain_table[] = {
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev1_2ghz_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev1_2ghz_tx_gain_table[] = {
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 90, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 88, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 85, },
@@ -1730,7 +1730,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev1_2ghz_tx_gain_table[] = {
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 60, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev1_5ghz_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev1_5ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 99, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 96, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 93, },
@@ -1861,7 +1861,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev1_5ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev2_nopa_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev2_nopa_tx_gain_table[] = {
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 152, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 147, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 143, },
@@ -1992,7 +1992,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev2_nopa_tx_gain_table[] = {
{ .gm = 255, .pga = 111, .pad = 29, .dac = 0, .bb_mult = 64, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev2_2ghz_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev2_2ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 99, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 96, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 93, .pad = 255, .dac = 0, .bb_mult = 64, },
@@ -2123,7 +2123,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev2_2ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 13, .pad = 52, .dac = 0, .bb_mult = 64, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev2_5ghz_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev2_5ghz_tx_gain_table[] = {
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 152, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 147, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 143, },
@@ -2392,7 +2392,7 @@ void lpphy_write_gain_table(struct b43_wldev *dev, int offset,
}
void lpphy_write_gain_table_bulk(struct b43_wldev *dev, int offset, int count,
- struct lpphy_tx_gain_table_entry *table)
+ const struct lpphy_tx_gain_table_entry *table)
{
int i;
diff --git a/drivers/net/wireless/broadcom/b43/tables_lpphy.h b/drivers/net/wireless/broadcom/b43/tables_lpphy.h
index 62002098bbda..1971ccccabf8 100644
--- a/drivers/net/wireless/broadcom/b43/tables_lpphy.h
+++ b/drivers/net/wireless/broadcom/b43/tables_lpphy.h
@@ -36,7 +36,7 @@ struct lpphy_tx_gain_table_entry {
void lpphy_write_gain_table(struct b43_wldev *dev, int offset,
struct lpphy_tx_gain_table_entry data);
void lpphy_write_gain_table_bulk(struct b43_wldev *dev, int offset, int count,
- struct lpphy_tx_gain_table_entry *table);
+ const struct lpphy_tx_gain_table_entry *table);
void lpphy_rev0_1_table_init(struct b43_wldev *dev);
void lpphy_rev2plus_table_init(struct b43_wldev *dev);
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 441d6440671b..2370a2e6a2e3 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -27,7 +27,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <net/dst.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "b43legacy.h"
#include "main.h"
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
index 0c3d119d1219..1e8495f50c16 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
@@ -123,7 +123,7 @@ static s32 brcmf_btcoex_params_read(struct brcmf_if *ifp, u32 addr, u32 *data)
{
*data = addr;
- return brcmf_fil_iovar_int_get(ifp, "btc_params", data);
+ return brcmf_fil_iovar_int_query(ifp, "btc_params", data);
}
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 1585a5653ee4..349aa3439502 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -663,8 +663,8 @@ static int brcmf_cfg80211_request_sta_if(struct brcmf_if *ifp, u8 *macaddr)
/* interface_create version 3+ */
/* get supported version from firmware side */
iface_create_ver = 0;
- err = brcmf_fil_bsscfg_int_get(ifp, "interface_create",
- &iface_create_ver);
+ err = brcmf_fil_bsscfg_int_query(ifp, "interface_create",
+ &iface_create_ver);
if (err) {
brcmf_err("fail to get supported version, err=%d\n", err);
return -EOPNOTSUPP;
@@ -756,8 +756,8 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
/* interface_create version 3+ */
/* get supported version from firmware side */
iface_create_ver = 0;
- err = brcmf_fil_bsscfg_int_get(ifp, "interface_create",
- &iface_create_ver);
+ err = brcmf_fil_bsscfg_int_query(ifp, "interface_create",
+ &iface_create_ver);
if (err) {
brcmf_err("fail to get supported version, err=%d\n", err);
return -EOPNOTSUPP;
@@ -1135,7 +1135,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
offset = offsetof(struct brcmf_scan_params_v2_le, channel_list) +
n_channels * sizeof(u16);
offset = roundup(offset, sizeof(u32));
- length += sizeof(ssid_le) * n_ssids,
+ length += sizeof(ssid_le) * n_ssids;
ptr = (char *)params_le + offset;
for (i = 0; i < n_ssids; i++) {
memset(&ssid_le, 0, sizeof(ssid_le));
@@ -2101,7 +2101,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
if (!sme->crypto.n_akm_suites)
return 0;
- err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), "wpa_auth", &val);
+ err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev),
+ "wpa_auth", &val);
if (err) {
bphy_err(drvr, "could not get wpa_auth (%d)\n", err);
return err;
@@ -2680,7 +2681,7 @@ brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev);
struct brcmf_pub *drvr = cfg->pub;
- s32 qdbm = 0;
+ s32 qdbm;
s32 err;
brcmf_dbg(TRACE, "Enter\n");
@@ -3067,7 +3068,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
struct brcmf_scb_val_le scbval;
struct brcmf_pktcnt_le pktcnt;
s32 err;
- u32 rate = 0;
+ u32 rate;
u32 rssi;
/* Get the current tx rate */
@@ -4320,9 +4321,16 @@ brcmf_pmksa_v3_op(struct brcmf_if *ifp, struct cfg80211_pmksa *pmksa,
/* Single PMK operation */
pmk_op->count = cpu_to_le16(1);
length += sizeof(struct brcmf_pmksa_v3);
- memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN);
- memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
- pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN;
+ if (pmksa->bssid)
+ memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN);
+ if (pmksa->pmkid) {
+ memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
+ pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN;
+ }
+ if (pmksa->ssid && pmksa->ssid_len) {
+ memcpy(pmk_op->pmk[0].ssid.SSID, pmksa->ssid, pmksa->ssid_len);
+ pmk_op->pmk[0].ssid.SSID_len = pmksa->ssid_len;
+ }
pmk_op->pmk[0].time_left = cpu_to_le32(alive ? BRCMF_PMKSA_NO_EXPIRY : 0);
}
@@ -7039,8 +7047,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
ch.bw = BRCMU_CHAN_BW_20;
cfg->d11inf.encchspec(&ch);
chaninfo = ch.chspec;
- err = brcmf_fil_bsscfg_int_get(ifp, "per_chan_info",
- &chaninfo);
+ err = brcmf_fil_bsscfg_int_query(ifp, "per_chan_info",
+ &chaninfo);
if (!err) {
if (chaninfo & WL_CHAN_RADAR)
channel->flags |=
@@ -7074,7 +7082,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
/* verify support for bw_cap command */
val = WLC_BAND_5G;
- err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val);
+ err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &val);
if (!err) {
/* only set 2G bandwidth using bw_cap command */
@@ -7150,11 +7158,11 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
int err;
band = WLC_BAND_2G;
- err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
+ err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band);
if (!err) {
bw_cap[NL80211_BAND_2GHZ] = band;
band = WLC_BAND_5G;
- err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
+ err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band);
if (!err) {
bw_cap[NL80211_BAND_5GHZ] = band;
return;
@@ -7163,7 +7171,6 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
return;
}
brcmf_dbg(INFO, "fallback to mimo_bw_cap info\n");
- mimo_bwcap = 0;
err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &mimo_bwcap);
if (err)
/* assume 20MHz if firmware does not give a clue */
@@ -7259,10 +7266,10 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
struct brcmf_pub *drvr = cfg->pub;
struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
struct wiphy *wiphy = cfg_to_wiphy(cfg);
- u32 nmode = 0;
+ u32 nmode;
u32 vhtmode = 0;
u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT };
- u32 rxchain = 0;
+ u32 rxchain;
u32 nchain;
int err;
s32 i;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index bf91b1e1368f..da72fd2d541f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -691,7 +691,7 @@ static int brcmf_net_mon_open(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- u32 monitor = 0;
+ u32 monitor;
int err;
brcmf_dbg(TRACE, "Enter\n");
@@ -1184,7 +1184,6 @@ static ssize_t bus_reset_write(struct file *file, const char __user *user_buf,
static const struct file_operations bus_reset_fops = {
.open = simple_open,
- .llseek = no_llseek,
.write = bus_reset_write,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index ea76b8d33401..39226b9c0fa8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -48,20 +48,20 @@
/**
* struct brcmf_ampdu_rx_reorder - AMPDU receive reorder info
*
- * @pktslots: dynamic allocated array for ordering AMPDU packets.
* @flow_id: AMPDU flow identifier.
* @cur_idx: last AMPDU index from firmware.
* @exp_idx: expected next AMPDU index.
* @max_idx: maximum amount of packets per AMPDU.
* @pend_pkts: number of packets currently in @pktslots.
+ * @pktslots: array for ordering AMPDU packets.
*/
struct brcmf_ampdu_rx_reorder {
- struct sk_buff **pktslots;
u8 flow_id;
u8 cur_idx;
u8 exp_idx;
u8 max_idx;
u8 pend_pkts;
+ struct sk_buff *pktslots[];
};
/* Forward decls for struct brcmf_pub (see below) */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index f23310a77a5d..0d9ae197fa1e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -184,7 +184,7 @@ static void brcmf_feat_wlc_version_overrides(struct brcmf_pub *drv)
static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
enum brcmf_feat_id id, char *name)
{
- u32 data = 0;
+ u32 data;
int err;
/* we need to know firmware error */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index 9ca1b2aadcb5..eed439b84010 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -7,7 +7,7 @@
#ifndef FWEH_H_
#define FWEH_H_
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <linux/if.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index a315a7fac6a0..31e080e4da66 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -96,15 +96,22 @@ static inline
s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
{
s32 err;
- __le32 data_le = cpu_to_le32(*data);
- err = brcmf_fil_cmd_data_get(ifp, cmd, &data_le, sizeof(data_le));
+ err = brcmf_fil_cmd_data_get(ifp, cmd, data, sizeof(*data));
if (err == 0)
- *data = le32_to_cpu(data_le);
+ *data = le32_to_cpu(*(__le32 *)data);
brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
return err;
}
+static inline
+s32 brcmf_fil_cmd_int_query(struct brcmf_if *ifp, u32 cmd, u32 *data)
+{
+ __le32 *data_le = (__le32 *)data;
+
+ *data_le = cpu_to_le32(*data);
+ return brcmf_fil_cmd_int_get(ifp, cmd, data);
+}
s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name,
const void *data, u32 len);
@@ -120,14 +127,21 @@ s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
static inline
s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
{
- __le32 data_le = cpu_to_le32(*data);
s32 err;
- err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
+ err = brcmf_fil_iovar_data_get(ifp, name, data, sizeof(*data));
if (err == 0)
- *data = le32_to_cpu(data_le);
+ *data = le32_to_cpu(*(__le32 *)data);
return err;
}
+static inline
+s32 brcmf_fil_iovar_int_query(struct brcmf_if *ifp, const char *name, u32 *data)
+{
+ __le32 *data_le = (__le32 *)data;
+
+ *data_le = cpu_to_le32(*data);
+ return brcmf_fil_iovar_int_get(ifp, name, data);
+}
s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
@@ -145,15 +159,21 @@ s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
static inline
s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
{
- __le32 data_le = cpu_to_le32(*data);
s32 err;
- err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
- sizeof(data_le));
+ err = brcmf_fil_bsscfg_data_get(ifp, name, data, sizeof(*data));
if (err == 0)
- *data = le32_to_cpu(data_le);
+ *data = le32_to_cpu(*(__le32 *)data);
return err;
}
+static inline
+s32 brcmf_fil_bsscfg_int_query(struct brcmf_if *ifp, const char *name, u32 *data)
+{
+ __le32 *data_le = (__le32 *)data;
+
+ *data_le = cpu_to_le32(*data);
+ return brcmf_fil_bsscfg_int_get(ifp, name, data);
+}
s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 36af81975855..0949e7975ff1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -1673,7 +1673,6 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
struct sk_buff_head reorder_list;
struct sk_buff *pnext;
u8 flags;
- u32 buf_size;
reorder_data = ((struct brcmf_skb_reorder_data *)pkt->cb)->reorder;
flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
@@ -1708,15 +1707,13 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
}
/* from here on we need a flow reorder instance */
if (rfi == NULL) {
- buf_size = sizeof(*rfi);
max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
- buf_size += (max_idx + 1) * sizeof(pkt);
-
/* allocate space for flow reorder info */
brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
flow_id, max_idx);
- rfi = kzalloc(buf_size, GFP_ATOMIC);
+ rfi = kzalloc(struct_size(rfi, pktslots, max_idx + 1),
+ GFP_ATOMIC);
if (rfi == NULL) {
bphy_err(drvr, "failed to alloc buffer\n");
brcmf_netif_rx(ifp, pkt);
@@ -1724,7 +1721,6 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
}
ifp->drvr->reorder_flows[flow_id] = rfi;
- rfi->pktslots = (struct sk_buff **)(rfi + 1);
rfi->max_idx = max_idx;
}
if (flags & BRCMF_RXREORDER_NEW_HOLE) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index ce482a3877e9..5dee54819fbd 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -16,7 +16,7 @@
#include <linux/kthread.h>
#include <linux/io.h>
#include <linux/random.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <soc.h>
#include <chipcommon.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 1461dc453ac2..7b936668c1b6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -23,7 +23,7 @@
#include <linux/bcma/bcma.h>
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <defs.h>
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c
index 2f8908074303..08841b9a5b81 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c
@@ -3,7 +3,7 @@
* Copyright (c) 2019 Broadcom
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/math.h>
#include <linux/string.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
index 33d17b779201..a767cbb79185 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
@@ -351,9 +351,7 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
{
struct ampdu_info *ampdu = wlc->ampdu;
u32 phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false);
- u32 txunfl_ratio;
u8 max_mpdu;
- u32 current_ampdu_cnt = 0;
u16 max_pld_size;
u32 new_txunfl;
struct brcms_fifo_info *fifo = (ampdu->fifo_tb + fid);
@@ -389,26 +387,8 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
if (fifo->accum_txfunfl < 10)
return 0;
- brcms_dbg_ht(wlc->hw->d11core, "ampdu_count %d tx_underflows %d\n",
- current_ampdu_cnt, fifo->accum_txfunfl);
+ brcms_dbg_ht(wlc->hw->d11core, "tx_underflows %d\n", fifo->accum_txfunfl);
- /*
- compute the current ratio of tx unfl per ampdu.
- When the current ampdu count becomes too
- big while the ratio remains small, we reset
- the current count in order to not
- introduce too big of a latency in detecting a
- large amount of tx underflows later.
- */
-
- txunfl_ratio = current_ampdu_cnt / fifo->accum_txfunfl;
-
- if (txunfl_ratio > ampdu->tx_max_funl) {
- if (current_ampdu_cnt >= FFPLD_MAX_AMPDU_CNT)
- fifo->accum_txfunfl = 0;
-
- return 0;
- }
max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
AMPDU_NUM_MPDU_LEGACY);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index d86f28b8bc60..5b1d35601bbd 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -1611,10 +1611,9 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
if (le32_to_cpu(hdr->idx) == idx) {
pdata = wl->fw.fw_bin[i]->data +
le32_to_cpu(hdr->offset);
- *pbuf = kvmalloc(len, GFP_KERNEL);
+ *pbuf = kvmemdup(pdata, len, GFP_KERNEL);
if (*pbuf == NULL)
- goto fail;
- memcpy(*pbuf, pdata, len);
+ return -ENOMEM;
return 0;
}
}
@@ -1622,7 +1621,6 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
brcms_err(wl->wlc->hw->d11core,
"ERROR: ucode buf tag:%d can not be found!\n", idx);
*pbuf = NULL;
-fail:
return -ENODATA;
}
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h
index 9065ca5b0208..bad080d33c07 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw.h
+++ b/drivers/net/wireless/intel/ipw2x00/libipw.h
@@ -345,14 +345,19 @@ struct libipw_hdr_2addr {
} __packed;
struct libipw_hdr_3addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(libipw_hdr_3addr_hdr, hdr, __packed,
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctl;
+ );
u8 payload[];
} __packed;
+static_assert(offsetof(struct libipw_hdr_3addr, payload) == sizeof(struct libipw_hdr_3addr_hdr),
+ "struct member likely outside of __struct_group()");
struct libipw_hdr_4addr {
__le16 frame_ctl;
@@ -400,7 +405,7 @@ struct libipw_info_element {
*/
struct libipw_auth {
- struct libipw_hdr_3addr header;
+ struct libipw_hdr_3addr_hdr header;
__le16 algorithm;
__le16 transaction;
__le16 status;
@@ -417,7 +422,7 @@ struct libipw_channel_switch {
} __packed;
struct libipw_action {
- struct libipw_hdr_3addr header;
+ struct libipw_hdr_3addr_hdr header;
u8 category;
u8 action;
union {
@@ -430,7 +435,7 @@ struct libipw_action {
} __packed;
struct libipw_disassoc {
- struct libipw_hdr_3addr header;
+ struct libipw_hdr_3addr_hdr header;
__le16 reason;
} __packed;
@@ -438,13 +443,13 @@ struct libipw_disassoc {
#define libipw_deauth libipw_disassoc
struct libipw_probe_request {
- struct libipw_hdr_3addr header;
+ struct libipw_hdr_3addr_hdr header;
/* SSID, supported rates */
u8 variable[];
} __packed;
struct libipw_probe_response {
- struct libipw_hdr_3addr header;
+ struct libipw_hdr_3addr_hdr header;
__le32 time_stamp[2];
__le16 beacon_interval;
__le16 capability;
@@ -456,16 +461,8 @@ struct libipw_probe_response {
/* Alias beacon for probe_response */
#define libipw_beacon libipw_probe_response
-struct libipw_assoc_request {
- struct libipw_hdr_3addr header;
- __le16 capability;
- __le16 listen_interval;
- /* SSID, supported rates, RSN */
- u8 variable[];
-} __packed;
-
struct libipw_reassoc_request {
- struct libipw_hdr_3addr header;
+ struct libipw_hdr_3addr_hdr header;
__le16 capability;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
@@ -473,7 +470,7 @@ struct libipw_reassoc_request {
} __packed;
struct libipw_assoc_response {
- struct libipw_hdr_3addr header;
+ struct libipw_hdr_3addr_hdr header;
__le16 capability;
__le16 status;
__le16 aid;
@@ -588,13 +585,6 @@ struct libipw_channel_map {
u8 map;
} __packed;
-struct libipw_ibss_dfs {
- struct libipw_info_element ie;
- u8 owner[ETH_ALEN];
- u8 recovery_interval;
- struct libipw_channel_map channel_map[];
-};
-
struct libipw_csa {
u8 mode;
u8 channel;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
index 903de34028ef..dbc7153d0a3d 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
@@ -509,7 +509,7 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
int i, idx, ret = 0;
int group_key = 0;
const char *alg, *module;
- struct lib80211_crypto_ops *ops;
+ const struct lib80211_crypto_ops *ops;
struct lib80211_crypt_data **crypt;
struct libipw_security sec = {
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index 1fab7849f56d..14d2331ee6cb 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -20,7 +20,7 @@
#include <linux/netdevice.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/mac80211.h>
#include "common.h"
@@ -527,7 +527,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
struct ieee80211_hdr *header;
struct ieee80211_rx_status rx_status = {};
struct il_rx_pkt *pkt = rxb_addr(rxb);
- struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
+ struct il3945_rx_frame_stats_hdr *rx_stats = IL_RX_STATS(pkt);
struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.h b/drivers/net/wireless/intel/iwlegacy/3945.h
index 82e4a4878bc2..ffbe11902628 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.h
+++ b/drivers/net/wireless/intel/iwlegacy/3945.h
@@ -157,8 +157,10 @@ struct il3945_ibss_seq {
};
#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\
- x->u.rx_frame.stats.payload + \
- x->u.rx_frame.stats.phy_count))
+ container_of(&x->u.rx_frame.stats, \
+ struct il3945_rx_frame_stats, \
+ hdr)->payload + \
+ x->u.rx_frame.stats.phy_count))
#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\
IL_RX_HDR(x)->payload + \
le16_to_cpu(IL_RX_HDR(x)->len)))
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 1600c344edbb..fcccde7bb659 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -1769,7 +1769,7 @@ il4965_tx_skb(struct il_priv *il,
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_cmd = txq->cmd[q->write_ptr];
out_meta = &txq->meta[q->write_ptr];
- tx_cmd = &out_cmd->cmd.tx;
+ tx_cmd = container_of(&out_cmd->cmd.tx, struct il_tx_cmd, __hdr);
memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index c34729f576cd..b63e29590b04 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -20,7 +20,7 @@
#include <linux/units.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "common.h"
#include "4965.h"
diff --git a/drivers/net/wireless/intel/iwlegacy/commands.h b/drivers/net/wireless/intel/iwlegacy/commands.h
index 28cf4e832152..4a9fa8b83f0f 100644
--- a/drivers/net/wireless/intel/iwlegacy/commands.h
+++ b/drivers/net/wireless/intel/iwlegacy/commands.h
@@ -201,9 +201,6 @@ struct il_cmd_header {
* 15 unsolicited RX or uCode-originated notification
*/
__le16 sequence;
-
- /* command or response/notification data follows immediately */
- u8 data[];
} __packed;
/**
@@ -1160,23 +1157,33 @@ struct il_wep_cmd {
#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
struct il3945_rx_frame_stats {
- u8 phy_count;
- u8 id;
- u8 rssi;
- u8 agc;
- __le16 sig_avg;
- __le16 noise_diff;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(il3945_rx_frame_stats_hdr, hdr, __packed,
+ u8 phy_count;
+ u8 id;
+ u8 rssi;
+ u8 agc;
+ __le16 sig_avg;
+ __le16 noise_diff;
+ );
u8 payload[];
} __packed;
+static_assert(offsetof(struct il3945_rx_frame_stats, payload) == sizeof(struct il3945_rx_frame_stats_hdr),
+ "struct member likely outside of __struct_group()");
struct il3945_rx_frame_hdr {
- __le16 channel;
- __le16 phy_flags;
- u8 reserved1;
- u8 rate;
- __le16 len;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(il3945_rx_frame_hdr_hdr, hdr, __packed,
+ __le16 channel;
+ __le16 phy_flags;
+ u8 reserved1;
+ u8 rate;
+ __le16 len;
+ );
u8 payload[];
} __packed;
+static_assert(offsetof(struct il3945_rx_frame_hdr, payload) == sizeof(struct il3945_rx_frame_hdr_hdr),
+ "struct member likely outside of __struct_group()");
struct il3945_rx_frame_end {
__le32 status;
@@ -1193,8 +1200,8 @@ struct il3945_rx_frame_end {
* stats.phy_count
*/
struct il3945_rx_frame {
- struct il3945_rx_frame_stats stats;
- struct il3945_rx_frame_hdr hdr;
+ struct il3945_rx_frame_stats_hdr stats;
+ struct il3945_rx_frame_hdr_hdr hdr;
struct il3945_rx_frame_end end;
} __packed;
@@ -1352,67 +1359,69 @@ struct il_rx_mpdu_res_start {
*/
struct il3945_tx_cmd {
- /*
- * MPDU byte count:
- * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
- * + 8 byte IV for CCM or TKIP (not used for WEP)
- * + Data payload
- * + 8-byte MIC (not used for CCM/WEP)
- * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
- * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
- * Range: 14-2342 bytes.
- */
- __le16 len;
-
- /*
- * MPDU or MSDU byte count for next frame.
- * Used for fragmentation and bursting, but not 11n aggregation.
- * Same as "len", but for next frame. Set to 0 if not applicable.
- */
- __le16 next_frame_len;
-
- __le32 tx_flags; /* TX_CMD_FLG_* */
-
- u8 rate;
-
- /* Index of recipient station in uCode's station table */
- u8 sta_id;
- u8 tid_tspec;
- u8 sec_ctl;
- u8 key[16];
- union {
- u8 byte[8];
- __le16 word[4];
- __le32 dw[2];
- } tkip_mic;
- __le32 next_frame_info;
- union {
- __le32 life_time;
- __le32 attempt;
- } stop_time;
- u8 supp_rates[2];
- u8 rts_retry_limit; /*byte 50 */
- u8 data_retry_limit; /*byte 51 */
- union {
- __le16 pm_frame_timeout;
- __le16 attempt_duration;
- } timeout;
-
- /*
- * Duration of EDCA burst Tx Opportunity, in 32-usec units.
- * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
- */
- __le16 driver_txop;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(il3945_tx_cmd_hdr, __hdr, __packed,
+ /*
+ * MPDU byte count:
+ * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+ * Range: 14-2342 bytes.
+ */
+ __le16 len;
+
+ /*
+ * MPDU or MSDU byte count for next frame.
+ * Used for fragmentation and bursting, but not 11n aggregation.
+ * Same as "len", but for next frame. Set to 0 if not applicable.
+ */
+ __le16 next_frame_len;
+
+ __le32 tx_flags; /* TX_CMD_FLG_* */
+
+ u8 rate;
+
+ /* Index of recipient station in uCode's station table */
+ u8 sta_id;
+ u8 tid_tspec;
+ u8 sec_ctl;
+ u8 key[16];
+ union {
+ u8 byte[8];
+ __le16 word[4];
+ __le32 dw[2];
+ } tkip_mic;
+ __le32 next_frame_info;
+ union {
+ __le32 life_time;
+ __le32 attempt;
+ } stop_time;
+ u8 supp_rates[2];
+ u8 rts_retry_limit; /*byte 50 */
+ u8 data_retry_limit; /*byte 51 */
+ union {
+ __le16 pm_frame_timeout;
+ __le16 attempt_duration;
+ } timeout;
+
+ /*
+ * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+ * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+ */
+ __le16 driver_txop;
+ );
/*
* MAC header goes here, followed by 2 bytes padding if MAC header
* length is 26 or 30 bytes, followed by payload data
*/
- union {
- DECLARE_FLEX_ARRAY(u8, payload);
- DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr);
- };
+ struct ieee80211_hdr hdr[];
} __packed;
+static_assert(offsetof(struct il3945_tx_cmd, hdr) == sizeof(struct il3945_tx_cmd_hdr),
+ "struct member likely outside of __struct_group()");
/*
* C_TX = 0x1c (response)
@@ -1438,83 +1447,87 @@ struct il_dram_scratch {
} __packed;
struct il_tx_cmd {
- /*
- * MPDU byte count:
- * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
- * + 8 byte IV for CCM or TKIP (not used for WEP)
- * + Data payload
- * + 8-byte MIC (not used for CCM/WEP)
- * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
- * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
- * Range: 14-2342 bytes.
- */
- __le16 len;
-
- /*
- * MPDU or MSDU byte count for next frame.
- * Used for fragmentation and bursting, but not 11n aggregation.
- * Same as "len", but for next frame. Set to 0 if not applicable.
- */
- __le16 next_frame_len;
-
- __le32 tx_flags; /* TX_CMD_FLG_* */
-
- /* uCode may modify this field of the Tx command (in host DRAM!).
- * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
- struct il_dram_scratch scratch;
-
- /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
- __le32 rate_n_flags; /* RATE_MCS_* */
-
- /* Index of destination station in uCode's station table */
- u8 sta_id;
-
- /* Type of security encryption: CCM or TKIP */
- u8 sec_ctl; /* TX_CMD_SEC_* */
-
- /*
- * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
- * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
- * data frames, this field may be used to selectively reduce initial
- * rate (via non-0 value) for special frames (e.g. management), while
- * still supporting rate scaling for all frames.
- */
- u8 initial_rate_idx;
- u8 reserved;
- u8 key[16];
- __le16 next_frame_flags;
- __le16 reserved2;
- union {
- __le32 life_time;
- __le32 attempt;
- } stop_time;
-
- /* Host DRAM physical address pointer to "scratch" in this command.
- * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
- __le32 dram_lsb_ptr;
- u8 dram_msb_ptr;
-
- u8 rts_retry_limit; /*byte 50 */
- u8 data_retry_limit; /*byte 51 */
- u8 tid_tspec;
- union {
- __le16 pm_frame_timeout;
- __le16 attempt_duration;
- } timeout;
-
- /*
- * Duration of EDCA burst Tx Opportunity, in 32-usec units.
- * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
- */
- __le16 driver_txop;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(il_tx_cmd_hdr, __hdr, __packed,
+ /*
+ * MPDU byte count:
+ * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+ * Range: 14-2342 bytes.
+ */
+ __le16 len;
+
+ /*
+ * MPDU or MSDU byte count for next frame.
+ * Used for fragmentation and bursting, but not 11n aggregation.
+ * Same as "len", but for next frame. Set to 0 if not applicable.
+ */
+ __le16 next_frame_len;
+
+ __le32 tx_flags; /* TX_CMD_FLG_* */
+
+ /* uCode may modify this field of the Tx command (in host DRAM!).
+ * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
+ struct il_dram_scratch scratch;
+
+ /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
+ __le32 rate_n_flags; /* RATE_MCS_* */
+
+ /* Index of destination station in uCode's station table */
+ u8 sta_id;
+
+ /* Type of security encryption: CCM or TKIP */
+ u8 sec_ctl; /* TX_CMD_SEC_* */
+
+ /*
+ * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
+ * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
+ * data frames, this field may be used to selectively reduce initial
+ * rate (via non-0 value) for special frames (e.g. management), while
+ * still supporting rate scaling for all frames.
+ */
+ u8 initial_rate_idx;
+ u8 reserved;
+ u8 key[16];
+ __le16 next_frame_flags;
+ __le16 reserved2;
+ union {
+ __le32 life_time;
+ __le32 attempt;
+ } stop_time;
+
+ /* Host DRAM physical address pointer to "scratch" in this command.
+ * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
+ __le32 dram_lsb_ptr;
+ u8 dram_msb_ptr;
+
+ u8 rts_retry_limit; /*byte 50 */
+ u8 data_retry_limit; /*byte 51 */
+ u8 tid_tspec;
+ union {
+ __le16 pm_frame_timeout;
+ __le16 attempt_duration;
+ } timeout;
+
+ /*
+ * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+ * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+ */
+ __le16 driver_txop;
+ );
/*
* MAC header goes here, followed by 2 bytes padding if MAC header
* length is 26 or 30 bytes, followed by payload data
*/
- u8 payload[0];
struct ieee80211_hdr hdr[];
} __packed;
+static_assert(offsetof(struct il_tx_cmd, hdr) == sizeof(struct il_tx_cmd_hdr),
+ "struct member likely outside of __struct_group()");
/* TX command response is sent after *3945* transmission attempts.
*
@@ -2502,7 +2515,7 @@ struct il3945_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
- struct il3945_tx_cmd tx_cmd;
+ struct il3945_tx_cmd_hdr tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
@@ -2546,7 +2559,7 @@ struct il_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
- struct il_tx_cmd tx_cmd;
+ struct il_tx_cmd_hdr tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
struct il_ssid_ie direct_scan[PROBE_OPTION_MAX];
@@ -2662,7 +2675,7 @@ struct il4965_beacon_notif {
*/
struct il3945_tx_beacon_cmd {
- struct il3945_tx_cmd tx;
+ struct il3945_tx_cmd_hdr tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
@@ -2670,7 +2683,7 @@ struct il3945_tx_beacon_cmd {
} __packed;
struct il_tx_beacon_cmd {
- struct il_tx_cmd tx;
+ struct il_tx_cmd_hdr tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 69687fcf963f..2147781b5fff 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -553,7 +553,7 @@ struct il_device_cmd {
u8 val8;
u16 val16;
u32 val32;
- struct il_tx_cmd tx;
+ struct il_tx_cmd_hdr tx;
u8 payload[DEF_CMD_PAYLOAD_SIZE];
} __packed cmd;
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 3b6b8b410be5..fa1be8c54d3c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 92
+#define IWL_BZ_UCODE_API_MAX 93
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 90
@@ -148,6 +148,17 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
+const struct iwl_cfg_trans_params iwl_gl_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_BZ,
+ .base_params = &iwl_bz_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
+ .gen2 = true,
+ .umac_prph_offset = 0x300000,
+ .xtal_latency = 12000,
+ .low_latency_xtal = true,
+};
+
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 4ccb0b7bdc20..f1dd1c29f305 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 92
+#define IWL_SC_UCODE_API_MAX 93
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 90
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
index 71f67a019cf6..5ca85d90a8d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
@@ -13,7 +13,7 @@
#include <linux/netdevice.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "iwl-io.h"
#include "iwl-trans.h"
#include "iwl-modparams.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 65b7c68e5ca7..e0b14be25b02 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1325,8 +1325,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
iwlwifi_mod_params.amsdu_size);
}
- trans_cfg.cmd_q_wdg_timeout = IWL_WATCHDOG_DISABLED;
-
trans_cfg.command_groups = iwl_dvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_dvm_groups);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index e9d2717362cf..7f67e602940c 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -13,7 +13,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <net/mac80211.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "iwl-trans.h"
#include "iwl-io.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 79774c8c7ff4..a7cea0a55b35 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -357,6 +357,11 @@ int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
}
mcc_val = wifi_pkg->package.elements[1].integer.value;
+ if (mcc_val != BIOS_MCC_CHINA) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "ACPI WRDD is supported only for CN\n");
+ goto out_free;
+ }
mcc[0] = (mcc_val >> 8) & 0xff;
mcc[1] = mcc_val & 0xff;
@@ -725,22 +730,25 @@ int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
entry = &wifi_pkg->package.elements[entry_idx];
entry_idx++;
if (entry->type != ACPI_TYPE_INTEGER ||
- entry->integer.value > num_profiles) {
+ entry->integer.value > num_profiles ||
+ entry->integer.value <
+ rev_data[idx].min_profiles) {
ret = -EINVAL;
goto out_free;
}
- num_profiles = entry->integer.value;
/*
- * this also validates >= min_profiles since we
- * otherwise wouldn't have gotten the data when
- * looking up in ACPI
+ * Check to see if we received package count
+ * same as max # of profiles
*/
if (wifi_pkg->package.count !=
hdr_size + profile_size * num_profiles) {
ret = -EINVAL;
goto out_free;
}
+
+ /* Number of valid profiles */
+ num_profiles = entry->integer.value;
}
goto read_table;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
index b97a43353779..ddc84430d895 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -95,7 +95,7 @@ enum iwl_bt_ci_compliance {
}; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */
/**
- * struct iwl_bt_coex_profile_notif - notification about BT coex
+ * struct iwl_bt_coex_prof_old_notif - notification about BT coex
* @mbox_msg: message from BT to WiFi
* @msg_idx: the index of the message
* @bt_ci_compliance: enum %iwl_bt_ci_compliance
@@ -110,7 +110,7 @@ enum iwl_bt_ci_compliance {
* @wifi_loss_mid_high_rssi: The predicted lost WiFi rate (% of air time that
* BT is utilizing) when the RSSI is mid/high (>= -65 dBm)
*/
-struct iwl_bt_coex_profile_notif {
+struct iwl_bt_coex_prof_old_notif {
__le32 mbox_msg[4];
__le32 msg_idx;
__le32 bt_ci_compliance;
@@ -126,4 +126,29 @@ struct iwl_bt_coex_profile_notif {
* BT_COEX_PROFILE_NTFY_API_S_VER_5
*/
+/**
+ * enum iwl_bt_coex_subcmd_ids - coex configuration command IDs
+ */
+enum iwl_bt_coex_subcmd_ids {
+ /**
+ *@PROFILE_NOTIF: &struct iwl_bt_coex_profile_notif
+ */
+ PROFILE_NOTIF = 0xFF,
+};
+
+#define COEX_NUM_BAND 3
+#define COEX_NUM_CHAINS 2
+
+/**
+ * struct iwl_bt_coex_profile_notif - notification about BT coex
+ * @wifi_loss_low_rssi: The predicted lost WiFi rate (% of air time that BT is
+ * utilizing) when the RSSI is low (<= -65 dBm)
+ * @wifi_loss_mid_high_rssi: The predicted lost WiFi rate (% of air time that
+ * BT is utilizing) when the RSSI is mid/high (>= -65 dBm)
+ */
+struct iwl_bt_coex_profile_notif {
+ u8 wifi_loss_low_rssi[COEX_NUM_BAND][COEX_NUM_CHAINS];
+ u8 wifi_loss_mid_high_rssi[COEX_NUM_BAND][COEX_NUM_CHAINS];
+} __packed; /* BT_COEX_BT_PROFILE_NTF_API_S_VER_1 */
+
#endif /* __iwl_fw_api_coex_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 7544c4cb1a30..2f40e69db318 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2022, 2024 Intel Corporation
*/
#ifndef __iwl_fw_api_commands_h__
#define __iwl_fw_api_commands_h__
@@ -25,6 +25,8 @@
* @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids
* @LOCATION_GROUP: location group, uses command IDs from
* &enum iwl_location_subcmd_ids
+ * @BT_COEX_GROUP: bt coex group, uses command IDs from
+ * &enum iwl_bt_coex_subcmd_ids
* @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from
* &enum iwl_prot_offload_subcmd_ids
* @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from
@@ -43,6 +45,7 @@ enum iwl_mvm_command_groups {
SCAN_GROUP = 0x6,
NAN_GROUP = 0x7,
LOCATION_GROUP = 0x8,
+ BT_COEX_GROUP = 0x9,
PROT_OFFLOAD_GROUP = 0xb,
REGULATORY_AND_NVM_GROUP = 0xc,
DEBUG_GROUP = 0xf,
@@ -144,8 +147,8 @@ enum iwl_legacy_cmds {
/**
* @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
* &struct iwl_tx_cmd_gen3,
- * response in &struct iwl_mvm_tx_resp or
- * &struct iwl_mvm_tx_resp_v3
+ * response in &struct iwl_tx_resp or
+ * &struct iwl_tx_resp_v3
*/
TX_CMD = 0x1c,
@@ -398,7 +401,7 @@ enum iwl_legacy_cmds {
REDUCE_TX_POWER_CMD = 0x9f,
/**
- * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif
+ * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif_v4
*/
MISSED_BEACONS_NOTIFICATION = 0xa2,
@@ -467,7 +470,7 @@ enum iwl_legacy_cmds {
MARKER_CMD = 0xcb,
/**
- * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif
+ * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_prof_old_notif
*/
BT_PROFILE_NOTIFICATION = 0xce,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 855cd13a181e..550de6db1834 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -476,6 +476,8 @@ enum iwl_fw_ini_region_device_memory_subtype {
* @IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START: start handling override preset
* request
* @IWL_FW_INI_TIME_SCAN_FAILURE: failed scan channel list
+ * @IWL_FW_INI_TIME_ESR_LINK_UP: EMLSR is active (several links are activated)
+ * @IWL_FW_INI_TIME_ESR_LINK_DOWN: EMLSR is inactive (only one active link left)
* @IWL_FW_INI_TIME_POINT_NUM: number of time points
*/
enum iwl_fw_ini_time_point {
@@ -509,6 +511,8 @@ enum iwl_fw_ini_time_point {
IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_EXT_REQ,
IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START,
IWL_FW_INI_TIME_SCAN_FAILURE,
+ IWL_FW_INI_TIME_ESR_LINK_UP,
+ IWL_FW_INI_TIME_ESR_LINK_DOWN,
IWL_FW_INI_TIME_POINT_NUM,
}; /* FW_TLV_DEBUG_TIME_POINT_API_E */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index ca6fa66d1917..c46e24fc6a1e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -42,7 +42,7 @@ enum iwl_mac_conf_subcmd_ids {
*/
LINK_CONFIG_CMD = 0x9,
/**
- * @STA_CONFIG_CMD: &struct iwl_mvm_sta_cfg_cmd
+ * @STA_CONFIG_CMD: &struct iwl_sta_cfg_cmd
*/
STA_CONFIG_CMD = 0xA,
/**
@@ -50,7 +50,7 @@ enum iwl_mac_conf_subcmd_ids {
*/
AUX_STA_CMD = 0xB,
/**
- * @STA_REMOVE_CMD: &struct iwl_mvm_remove_sta_cmd
+ * @STA_REMOVE_CMD: &struct iwl_remove_sta_cmd
*/
STA_REMOVE_CMD = 0xC,
/**
@@ -62,6 +62,14 @@ enum iwl_mac_conf_subcmd_ids {
*/
ROC_CMD = 0xE,
/**
+ * @MISSED_BEACONS_NOTIF: &struct iwl_missed_beacons_notif
+ */
+ MISSED_BEACONS_NOTIF = 0xF6,
+ /**
+ * @EMLSR_TRANS_FAIL_NOTIF: &struct iwl_esr_trans_fail_notif
+ */
+ EMLSR_TRANS_FAIL_NOTIF = 0xF7,
+ /**
* @ROC_NOTIF: &struct iwl_roc_notif
*/
ROC_NOTIF = 0xF8,
@@ -446,6 +454,9 @@ enum iwl_link_ctx_flags {
* @listen_lmac: indicates whether the link should be allocated on the Listen
* Lmac or on the Main Lmac. Cannot be changed on an active Link.
* Relevant only for eSR.
+ * @block_tx: tell the firmware that this link can't Tx. This should be used
+ * only when a link is de-activated because of CSA with mode = 1.
+ * Available since version 5.
* @reserved1: in version 2, listen_lmac became reserved
* @cck_rates: basic rates available for CCK
* @ofdm_rates: basic rates available for OFDM
@@ -472,7 +483,9 @@ enum iwl_link_ctx_flags {
* @bssid_index: index of the associated VAP
* @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
* @spec_link_id: link_id as the AP knows it
- * @reserved2: alignment
+ * @ul_mu_data_disable: OM Control UL MU Data Disable RX Support (bit 44) in
+ * HE MAC Capabilities information field as defined in figure 9-897 in
+ * IEEE802.11REVme-D5.0
* @ibss_bssid_addr: bssid for ibss
* @reserved_for_ibss_bssid_addr: reserved
* @reserved3: reserved for future use
@@ -488,7 +501,10 @@ struct iwl_link_config_cmd {
__le32 active;
union {
__le32 listen_lmac;
- __le32 reserved1;
+ struct {
+ u8 block_tx;
+ u8 reserved1[3];
+ };
};
__le32 cck_rates;
__le32 ofdm_rates;
@@ -515,17 +531,17 @@ struct iwl_link_config_cmd {
u8 bssid_index;
u8 bss_color;
u8 spec_link_id;
- u8 reserved2;
+ u8 ul_mu_data_disable;
u8 ibss_bssid_addr[6];
__le16 reserved_for_ibss_bssid_addr;
__le32 reserved3[8];
-} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3 */
+} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3, _VER_4, _VER_5 */
/* Currently FW supports link ids in the range 0-3 and can have
* at most two active links for each vif.
*/
-#define IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM 2
-#define IWL_MVM_FW_MAX_LINK_ID 3
+#define IWL_FW_MAX_ACTIVE_LINKS_NUM 2
+#define IWL_FW_MAX_LINK_ID 3
/**
* enum iwl_fw_sta_type - FW station types
@@ -547,7 +563,7 @@ enum iwl_fw_sta_type {
}; /* STATION_TYPE_E_VER_1 */
/**
- * struct iwl_mvm_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's
+ * struct iwl_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's
* station table
* ( STA_CONFIG_CMD = 0xA )
*
@@ -579,7 +595,7 @@ enum iwl_fw_sta_type {
* capa
* @htc_flags: which features are supported in HTC
*/
-struct iwl_mvm_sta_cfg_cmd {
+struct iwl_sta_cfg_cmd {
__le32 sta_id;
__le32 link_id;
u8 peer_mld_address[ETH_ALEN];
@@ -620,13 +636,13 @@ struct iwl_mvm_aux_sta_cmd {
} __packed; /* AUX_STA_CMD_API_S_VER_1 */
/**
- * struct iwl_mvm_remove_sta_cmd - a cmd structure to remove a sta added by
+ * struct iwl_remove_sta_cmd - a cmd structure to remove a sta added by
* STA_CONFIG_CMD or AUX_STA_CONFIG_CMD
* ( STA_REMOVE_CMD = 0xC )
*
* @sta_id: index of station to remove
*/
-struct iwl_mvm_remove_sta_cmd {
+struct iwl_remove_sta_cmd {
__le32 sta_id;
} __packed; /* REMOVE_STA_API_S_VER_1 */
@@ -663,4 +679,51 @@ struct iwl_mvm_esr_mode_notif {
__le32 action;
} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */
+/**
+ * struct iwl_missed_beacons_notif - sent when by the firmware upon beacon loss
+ * ( MISSED_BEACONS_NOTIF = 0xF6 )
+ * @link_id: fw link ID
+ * @consec_missed_beacons_since_last_rx: number of consecutive missed
+ * beacons since last RX.
+ * @consec_missed_beacons: number of consecutive missed beacons
+ * @other_link_id: used in EMLSR only. The fw link ID for
+ * &consec_missed_beacons_other_link. IWL_MVM_FW_LINK_ID_INVALID (0xff) if
+ * invalid.
+ * @consec_missed_beacons_other_link: number of consecutive missed beacons on
+ * &other_link_id.
+ */
+struct iwl_missed_beacons_notif {
+ __le32 link_id;
+ __le32 consec_missed_beacons_since_last_rx;
+ __le32 consec_missed_beacons;
+ __le32 other_link_id;
+ __le32 consec_missed_beacons_other_link;
+} __packed; /* MISSED_BEACON_NTFY_API_S_VER_5 */
+
+/*
+ * enum iwl_esr_trans_fail_code: to be used to parse the notif below
+ *
+ * @ESR_TRANS_FAILED_TX_STATUS_ERROR: failed to TX EML OMN frame
+ * @ESR_TRANSITION_FAILED_TX_TIMEOUT: timeout on the EML OMN frame
+ * @ESR_TRANSITION_FAILED_BEACONS_NOT_HEARD: can't get a beacon on the new link
+ */
+enum iwl_esr_trans_fail_code {
+ ESR_TRANS_FAILED_TX_STATUS_ERROR,
+ ESR_TRANSITION_FAILED_TX_TIMEOUT,
+ ESR_TRANSITION_FAILED_BEACONS_NOT_HEARD,
+};
+
+/**
+ * struct iwl_esr_trans_fail_notif - FW reports a failure in EMLSR transition
+ *
+ * @link_id: the link_id that still works after the failure
+ * @activation: true if the link was activated, false otherwise
+ * @err_code: see &enum iwl_esr_trans_fail_code
+ */
+struct iwl_esr_trans_fail_notif {
+ __le32 link_id;
+ __le32 activation;
+ __le32 err_code;
+} __packed; /* ESR_TRANSITION_FAILED_NTFY_API_S_VER_1 */
+
#endif /* __iwl_fw_api_mac_cfg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index bcbbf8c4a297..977ca4ac166d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -16,7 +16,7 @@
#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1)
#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2)
-#define IWL_MVM_STATION_COUNT_MAX 16
+#define IWL_STATION_COUNT_MAX 16
#define IWL_MVM_INVALID_STA 0xFF
enum iwl_ac {
@@ -378,7 +378,7 @@ struct iwl_missed_beacons_notif_ver_3 {
} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
/**
- * struct iwl_missed_beacons_notif - information on missed beacons
+ * struct iwl_missed_beacons_notif_v4 - information on missed beacons
* ( MISSED_BEACONS_NOTIFICATION = 0xa2 )
* @link_id: fw link ID
* @consec_missed_beacons_since_last_rx: number of consecutive missed
@@ -387,7 +387,7 @@ struct iwl_missed_beacons_notif_ver_3 {
* @num_expected_beacons: number of expected beacons
* @num_recvd_beacons: number of received beacons
*/
-struct iwl_missed_beacons_notif {
+struct iwl_missed_beacons_notif_v4 {
__le32 link_id;
__le32 consec_missed_beacons_since_last_rx;
__le32 consec_missed_beacons;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 6e6a92d173cc..df0680eae30c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -285,18 +285,12 @@ enum iwl_dev_tx_power_cmd_mode {
* @set_mode: see &enum iwl_dev_tx_power_cmd_mode
* @mac_context_id: id of the mac ctx for which we are reducing TX power.
* @pwr_restriction: TX power restriction in 1/8 dBms.
- * @dev_24: device TX power restriction in 1/8 dBms
- * @dev_52_low: device TX power restriction upper band - low
- * @dev_52_high: device TX power restriction upper band - high
*/
struct iwl_dev_tx_power_common {
__le32 set_mode;
__le32 mac_context_id;
__le16 pwr_restriction;
- __le16 dev_24;
- __le16 dev_52_low;
- __le16 dev_52_high;
-};
+} __packed;
/**
* struct iwl_dev_tx_power_cmd_v3 - TX power reduction command version 3
@@ -412,8 +406,20 @@ struct iwl_dev_tx_power_cmd_v8 {
__le32 tpc_vlp_backoff_level;
} __packed; /* TX_REDUCED_POWER_API_S_VER_8 */
+/*
+ * @dev_24: device TX power restriction in 1/8 dBms
+ * @dev_52_low: device TX power restriction upper band - low
+ * @dev_52_high: device TX power restriction upper band - high
+ */
+struct iwl_dev_tx_power_cmd_per_band {
+ __le16 dev_24;
+ __le16 dev_52_low;
+ __le16 dev_52_high;
+} __packed;
+
/**
- * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
+ * struct iwl_dev_tx_power_cmd_v3_v8 - TX power reduction command (multiversion)
+ * @per_band: per band restrictions
* @common: common part of the command
* @v3: version 3 part of the command
* @v4: version 4 part of the command
@@ -422,8 +428,9 @@ struct iwl_dev_tx_power_cmd_v8 {
* @v7: version 7 part of the command
* @v8: version 8 part of the command
*/
-struct iwl_dev_tx_power_cmd {
+struct iwl_dev_tx_power_cmd_v3_v8 {
struct iwl_dev_tx_power_common common;
+ struct iwl_dev_tx_power_cmd_per_band per_band;
union {
struct iwl_dev_tx_power_cmd_v3 v3;
struct iwl_dev_tx_power_cmd_v4 v4;
@@ -434,6 +441,60 @@ struct iwl_dev_tx_power_cmd {
};
};
+/**
+ * struct iwl_dev_tx_power_cmd_v9 - TX power reduction cmd
+ * @reserved: reserved (padding)
+ * @per_chain: per chain restrictions
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved1: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ */
+struct iwl_dev_tx_power_cmd_v9 {
+ __le16 reserved;
+ __le16 per_chain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1];
+ u8 per_chain_restriction_changed;
+ u8 reserved1[3];
+ __le32 timer_period;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_9 */
+
+/**
+ * struct iwl_dev_tx_power_cmd_v10 - TX power reduction cmd
+ * @per_chain: per chain restrictions
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ * @flags: reduce power flags.
+ */
+struct iwl_dev_tx_power_cmd_v10 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ u8 per_chain_restriction_changed;
+ u8 reserved;
+ __le32 timer_period;
+ __le32 flags;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_10 */
+
+/*
+ * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
+ * @common: common part of the command
+ * @v9: version 9 part of the command
+ * @v10: version 10 part of the command
+ */
+struct iwl_dev_tx_power_cmd {
+ struct iwl_dev_tx_power_common common;
+ union {
+ struct iwl_dev_tx_power_cmd_v9 v9;
+ struct iwl_dev_tx_power_cmd_v10 v10;
+ };
+} __packed; /* TX_REDUCED_POWER_API_S_VER_9_VER10 */
+
#define IWL_NUM_GEO_PROFILES 3
#define IWL_NUM_GEO_PROFILES_V3 8
#define IWL_NUM_BANDS_PER_CHAIN_V1 2
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 8598031567bb..f486d624500b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -731,39 +731,46 @@ enum iwl_umac_scan_general_params_flags2 {
* struct iwl_scan_channel_cfg_umac
* @flags: bitmap - 0-19: directed scan to i'th ssid.
* @channel_num: channel number 1-13 etc.
- * @band: band of channel: 0 for 2GHz, 1 for 5GHz
- * @iter_count: repetition count for the channel.
- * @iter_interval: interval between two scan iterations on one channel.
+ * @v1: command version 1
+ * @v1.iter_count: repetition count for the channel.
+ * @v1.iter_interval: interval between two scan iterations on one channel.
+ * @v2: command versions 2-4
+ * @v2.band: band of channel: 0 for 2GHz, 1 for 5GHz
+ * @v2.iter_count: repetition count for the channel.
+ * @v2.iter_interval: interval between two scan iterations on one channel.
+ * @v5: command versions 5 and up
+ * @v5.iter_count: repetition count for the channel.
+ * @v5.iter_interval: interval between two scan iterations on one channel.
+ * @v5.psd_20: highest PSD value for all APs known so far
+ * on this channel.
*/
struct iwl_scan_channel_cfg_umac {
#define IWL_CHAN_CFG_FLAGS_BAND_POS 30
__le32 flags;
+ u8 channel_num;
/* All versions are of the same size, so use a union without adjusting
* the command size later
*/
union {
struct {
- u8 channel_num;
u8 iter_count;
__le16 iter_interval;
- } v1; /* SCAN_CHANNEL_CONFIG_API_S_VER_1 */
+ } __packed v1; /* SCAN_CHANNEL_CONFIG_API_S_VER_1 */
struct {
- u8 channel_num;
u8 band;
u8 iter_count;
u8 iter_interval;
- } v2; /* SCAN_CHANNEL_CONFIG_API_S_VER_2
- * SCAN_CHANNEL_CONFIG_API_S_VER_3
- * SCAN_CHANNEL_CONFIG_API_S_VER_4
- */
+ } __packed v2; /* SCAN_CHANNEL_CONFIG_API_S_VER_2
+ * SCAN_CHANNEL_CONFIG_API_S_VER_3
+ * SCAN_CHANNEL_CONFIG_API_S_VER_4
+ */
struct {
- u8 channel_num;
u8 psd_20;
u8 iter_count;
u8 iter_interval;
- } v5; /* SCAN_CHANNEL_CONFIG_API_S_VER_5 */
- };
+ } __packed v5; /* SCAN_CHANNEL_CONFIG_API_S_VER_5 */
+ } __packed;
} __packed;
/**
@@ -1133,6 +1140,19 @@ struct iwl_umac_scan_abort {
} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
/**
+ * enum iwl_umac_scan_abort_status
+ *
+ * @IWL_UMAC_SCAN_ABORT_STATUS_SUCCESS: scan was successfully aborted
+ * @IWL_UMAC_SCAN_ABORT_STATUS_IN_PROGRESS: scan abort is in progress
+ * @IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND: nothing to abort
+ */
+enum iwl_umac_scan_abort_status {
+ IWL_UMAC_SCAN_ABORT_STATUS_SUCCESS = 0,
+ IWL_UMAC_SCAN_ABORT_STATUS_IN_PROGRESS,
+ IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND,
+};
+
+/**
* struct iwl_umac_scan_complete
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @last_schedule: last scheduling line
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
index 2271b19213fa..0a9f14fb04be 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020 - 2021, 2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020 - 2021, 2023 - 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -327,14 +327,14 @@ struct mvm_statistics_load {
__le32 air_time[MAC_INDEX_AUX];
__le32 byte_count[MAC_INDEX_AUX];
__le32 pkt_count[MAC_INDEX_AUX];
- u8 avg_energy[IWL_MVM_STATION_COUNT_MAX];
+ u8 avg_energy[IWL_STATION_COUNT_MAX];
} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */
struct mvm_statistics_load_v1 {
__le32 air_time[NUM_MAC_INDEX];
__le32 byte_count[NUM_MAC_INDEX];
__le32 pkt_count[NUM_MAC_INDEX];
- u8 avg_energy[IWL_MVM_STATION_COUNT_MAX];
+ u8 avg_energy[IWL_STATION_COUNT_MAX];
} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */
struct mvm_statistics_rx {
@@ -594,7 +594,7 @@ struct iwl_stats_ntfy_per_sta {
} __packed; /* STATISTICS_NTFY_PER_STA_API_S_VER_1 */
#define IWL_STATS_MAX_PHY_OPERATIONAL 3
-#define IWL_STATS_MAX_FW_LINKS (IWL_MVM_FW_MAX_LINK_ID + 1)
+#define IWL_STATS_MAX_FW_LINKS (IWL_FW_MAX_LINK_ID + 1)
/**
* struct iwl_system_statistics_notif_oper
@@ -608,7 +608,7 @@ struct iwl_system_statistics_notif_oper {
__le32 time_stamp;
struct iwl_stats_ntfy_per_link per_link[IWL_STATS_MAX_FW_LINKS];
struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL];
- struct iwl_stats_ntfy_per_sta per_sta[IWL_MVM_STATION_COUNT_MAX];
+ struct iwl_stats_ntfy_per_sta per_sta[IWL_STATION_COUNT_MAX];
} __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_API_S_VER_3 */
/**
@@ -651,7 +651,7 @@ struct iwl_statistics_operational_ntfy {
__le32 flags;
struct iwl_stats_ntfy_per_mac per_mac[MAC_INDEX_AUX];
struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL];
- struct iwl_stats_ntfy_per_sta per_sta[IWL_MVM_STATION_COUNT_MAX];
+ struct iwl_stats_ntfy_per_sta per_sta[IWL_STATION_COUNT_MAX];
__le64 rx_time;
__le64 tx_time;
__le64 on_time_rf;
@@ -699,7 +699,7 @@ struct iwl_statistics_operational_ntfy_ver_14 {
__le64 tx_time;
__le64 on_time_rf;
__le64 on_time_scan;
- __le32 average_energy[IWL_MVM_STATION_COUNT_MAX];
+ __le32 average_energy[IWL_STATION_COUNT_MAX];
__le32 reserved;
} __packed; /* STATISTICS_OPERATIONAL_NTFY_API_S_VER_14 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index c5277e2f8cd4..f3bf2e087a40 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -486,7 +486,7 @@ struct agg_tx_status {
#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
/**
- * struct iwl_mvm_tx_resp_v3 - notifies that fw is TXing a packet
+ * struct iwl_tx_resp_v3 - notifies that fw is TXing a packet
* ( REPLY_TX = 0x1c )
* @frame_count: 1 no aggregation, >1 aggregation
* @bt_kill_count: num of times blocked by bluetooth (unused for agg)
@@ -517,7 +517,7 @@ struct agg_tx_status {
* After the array of statuses comes the SSN of the SCD. Look at
* %iwl_mvm_get_scd_ssn for more details.
*/
-struct iwl_mvm_tx_resp_v3 {
+struct iwl_tx_resp_v3 {
u8 frame_count;
u8 bt_kill_count;
u8 failure_rts;
@@ -543,7 +543,7 @@ struct iwl_mvm_tx_resp_v3 {
} __packed; /* TX_RSP_API_S_VER_3 */
/**
- * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet
+ * struct iwl_tx_resp - notifies that fw is TXing a packet
* ( REPLY_TX = 0x1c )
* @frame_count: 1 no aggregation, >1 aggregation
* @bt_kill_count: num of times blocked by bluetooth (unused for agg)
@@ -575,7 +575,7 @@ struct iwl_mvm_tx_resp_v3 {
* After the array of statuses comes the SSN of the SCD. Look at
* %iwl_mvm_get_scd_ssn for more details.
*/
-struct iwl_mvm_tx_resp {
+struct iwl_tx_resp {
u8 frame_count;
u8 bt_kill_count;
u8 failure_rts;
@@ -823,7 +823,7 @@ struct iwl_mac_beacon_cmd {
*/
struct iwl_beacon_notif {
- struct iwl_mvm_tx_resp beacon_notify_hdr;
+ struct iwl_tx_resp beacon_notify_hdr;
__le64 tsf;
__le32 ibss_mgr_status;
} __packed;
@@ -836,7 +836,7 @@ struct iwl_beacon_notif {
* @gp2: last beacon time in gp2
*/
struct iwl_extended_beacon_notif_v5 {
- struct iwl_mvm_tx_resp beacon_notify_hdr;
+ struct iwl_tx_resp beacon_notify_hdr;
__le64 tsf;
__le32 ibss_mgr_status;
__le32 gp2;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index fa57df336785..fb2ea38e89ac 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -3348,7 +3348,7 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
{
int ret __maybe_unused = 0;
- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
+ if (!iwl_trans_fw_running(fwrt->trans))
return;
if (fw_has_capa(&fwrt->fw->ucode_capa,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index 560a91998cc4..4d9a1f83ef8c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -634,3 +634,19 @@ int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
GET_BIOS_TABLE(dsm, fwrt, func, value);
}
IWL_EXPORT_SYMBOL(iwl_bios_get_dsm);
+
+bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc)
+{
+ /* Some kind of regulatory mess means we need to currently disallow
+ * puncturing in the US and Canada unless enabled in BIOS.
+ */
+ switch (mcc) {
+ case IWL_MCC_US:
+ return puncturing & IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK;
+ case IWL_MCC_CANADA:
+ return puncturing & IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK;
+ default:
+ return true;
+ }
+}
+IWL_EXPORT_SYMBOL(iwl_puncturing_is_allowed_in_bios);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index e2c056f483c1..81787501d4a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -45,6 +45,8 @@
#define IWL_WTAS_ENABLE_IEC_MSK 0x4
#define IWL_WTAS_USA_UHB_MSK BIT(16)
+#define BIOS_MCC_CHINA 0x434e
+
/*
* The profile for revision 2 is a superset of revision 1, which is in
* turn a superset of revision 0. So we can store all revisions
@@ -217,4 +219,6 @@ static inline u32 iwl_bios_get_ppag_flags(const u32 ppag_modes,
return ppag_modes & (ppag_ver < 3 ? IWL_PPAG_ETSI_CHINA_MASK :
IWL_PPAG_REV3_MASK);
}
+
+bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc);
#endif /* __fw_regulatory_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index fb982d4fe851..091fb6fd7c78 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -638,7 +638,7 @@ int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
goto out;
}
- if (data->mcc != UEFI_MCC_CHINA) {
+ if (data->mcc != BIOS_MCC_CHINA) {
ret = -EINVAL;
IWL_DEBUG_RADIO(fwrt, "UEFI WRDD is supported only for CN\n");
goto out;
@@ -729,3 +729,32 @@ out:
kfree(data);
return ret;
}
+
+int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_puncturing_data *data;
+ /* default value is not enabled if there is any issue in reading
+ * uefi variable or revision is not supported
+ */
+ int puncturing = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans,
+ IWL_UEFI_PUNCTURING_NAME,
+ "UefiCnvWlanPuncturing",
+ sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return puncturing;
+
+ if (data->revision != IWL_UEFI_PUNCTURING_REVISION) {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI PUNCTURING rev:%d\n",
+ data->revision);
+ } else {
+ puncturing = data->puncturing & IWL_UEFI_PUNCTURING_REV0_MASK;
+ IWL_DEBUG_RADIO(fwrt, "Loaded puncturing bits from UEFI: %d\n",
+ puncturing);
+ }
+
+ kfree(data);
+ return puncturing;
+}
+IWL_EXPORT_SYMBOL(iwl_uefi_get_puncturing);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index 1f8884ca8997..e525d449e656 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -22,6 +22,7 @@
#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV"
#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg"
#define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM"
+#define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing"
#define IWL_SGOM_MAP_SIZE 339
@@ -38,6 +39,7 @@
#define IWL_UEFI_ECKV_REVISION 0
#define IWL_UEFI_WBEM_REVISION 0
#define IWL_UEFI_DSM_REVISION 4
+#define IWL_UEFI_PUNCTURING_REVISION 0
struct pnvm_sku_package {
u8 rev;
@@ -149,8 +151,6 @@ struct uefi_cnv_var_splc {
u32 default_pwr_limit;
} __packed;
-#define UEFI_MCC_CHINA 0x434e
-
/* struct uefi_cnv_var_wrdd - WRDD table as defined in UEFI
* @revision: the revision of the table
* @mcc: country identifier as defined in ISO/IEC 3166-1 Alpha 2 code
@@ -194,6 +194,25 @@ struct uefi_cnv_wlan_wbem_data {
u32 wbem_320mhz_per_mcc;
} __packed;
+enum iwl_uefi_cnv_puncturing_flags {
+ IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK = BIT(0),
+ IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK = BIT(1),
+};
+
+#define IWL_UEFI_PUNCTURING_REV0_MASK (IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK | \
+ IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK)
+/**
+ * struct uefi_cnv_var_puncturing_data - controlling channel
+ * puncturing for few countries.
+ * @revision: the revision of the table
+ * @puncturing: enablement of channel puncturing per mcc
+ * see &enum iwl_uefi_cnv_puncturing_flags.
+ */
+struct uefi_cnv_var_puncturing_data {
+ u8 revision;
+ u32 puncturing;
+} __packed;
+
/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
@@ -224,6 +243,7 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
int iwl_uefi_get_uats_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt);
#else /* CONFIG_EFI */
static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
@@ -320,5 +340,11 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
{
return 0;
}
+
+static inline
+int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt)
+{
+ return 0;
+}
#endif /* CONFIG_EFI */
#endif /* __iwl_fw_uefi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index b2abd4fd1944..34c91deca57b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -504,6 +504,7 @@ extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg;
extern const char iwl9162_name[];
extern const char iwl9260_name[];
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index aaaabd67f959..2abfc986701f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1205,7 +1205,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
if (le32_to_cpup((const __le32 *)tlv_data) >
- IWL_MVM_STATION_COUNT_MAX) {
+ IWL_STATION_COUNT_MAX) {
IWL_ERR(drv,
"%d is an invalid number of station\n",
le32_to_cpup((const __le32 *)tlv_data));
@@ -1479,7 +1479,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
- fw->ucode_capa.num_stations = IWL_MVM_STATION_COUNT_MAX;
+ fw->ucode_capa.num_stations = IWL_STATION_COUNT_MAX;
fw->ucode_capa.num_beacons = 1;
/* dump all fw memory areas by default */
fw->dbg.dump_mask = 0xffffffff;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index 595fa6ddf084..8ef5ed2db051 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -85,6 +85,10 @@ struct iwl_cfg;
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
* @time_point: called when transport layer wants to collect debug data
+ * @device_powered_off: called upon resume from hibernation but not only.
+ * Op_mode needs to reset its internal state because the device did not
+ * survive the system state transition. The firmware is no longer running,
+ * etc...
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -107,6 +111,7 @@ struct iwl_op_mode_ops {
void (*time_point)(struct iwl_op_mode *op_mode,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data);
+ void (*device_powered_off)(struct iwl_op_mode *op_mode);
};
int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
@@ -204,4 +209,11 @@ static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode,
op_mode->ops->time_point(op_mode, tp_id, tp_data);
}
+static inline void iwl_op_mode_device_powered_off(struct iwl_op_mode *op_mode)
+{
+ if (!op_mode || !op_mode->ops || !op_mode->ops->device_powered_off)
+ return;
+ op_mode->ops->device_powered_off(op_mode);
+}
+
#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 6148acbac6af..e95ffe303547 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -386,7 +386,6 @@ struct iwl_dump_sanitize_ops {
* @cmd_queue: the index of the command queue.
* Must be set before start_fw.
* @cmd_fifo: the fifo for host commands
- * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
* @no_reclaim_cmds: Some devices erroneously don't set the
* SEQ_RX_FRAME bit on some notifications, this is the
* list of such notifications to filter. Max length is
@@ -412,7 +411,6 @@ struct iwl_trans_config {
u8 cmd_queue;
u8 cmd_fifo;
- unsigned int cmd_q_wdg_timeout;
const u8 *no_reclaim_cmds;
unsigned int n_no_reclaim_cmds;
@@ -1128,8 +1126,8 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
/* prevent double restarts due to the same erroneous FW */
if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
- iwl_op_mode_nic_error(trans->op_mode, sync);
trans->state = IWL_TRANS_NO_FW;
+ iwl_op_mode_nic_error(trans->op_mode, sync);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
index 4900de3cc0d3..2081775e0ec9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
+++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
@@ -283,6 +283,16 @@ struct iwl_mei_colloc_info {
u8 bssid[ETH_ALEN];
};
+/**
+ * enum iwl_mei_sap_version - SAP version
+ * @IWL_MEI_SAP_VERSION_3: SAP version 3
+ * @IWL_MEI_SAP_VERSION_4: SAP version 4
+ */
+enum iwl_mei_sap_version {
+ IWL_MEI_SAP_VERSION_3 = 3,
+ IWL_MEI_SAP_VERSION_4 = 4,
+};
+
/*
* struct iwl_mei_ops - driver's operations called by iwlmei
* Operations will not be called more than once concurrently.
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
index 1dd9106c6513..dce0b7cf7b26 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2021-2023 Intel Corporation
+ * Copyright (C) 2021-2024 Intel Corporation
*/
#include <linux/etherdevice.h>
@@ -58,7 +58,6 @@ bool iwl_mei_is_connected(void)
}
EXPORT_SYMBOL_GPL(iwl_mei_is_connected);
-#define SAP_VERSION 3
#define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */
struct iwl_sap_q_ctrl_blk {
@@ -110,16 +109,19 @@ struct iwl_sap_shared_mem_ctrl_blk {
#define SAP_H2M_DATA_Q_SZ 48256
#define SAP_M2H_DATA_Q_SZ 24128
-#define SAP_H2M_NOTIF_Q_SZ 2240
+#define SAP_H2M_NOTIF_Q_SZ_VER3 2240
+#define SAP_H2M_NOTIF_Q_SZ_VER4 32768
#define SAP_M2H_NOTIF_Q_SZ 62720
-#define _IWL_MEI_SAP_SHARED_MEM_SZ \
+#define _IWL_MEI_SAP_SHARED_MEM_SZ_VER3 \
(sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \
- SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \
+ SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ_VER3 + \
SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4)
-#define IWL_MEI_SAP_SHARED_MEM_SZ \
- (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE))
+#define _IWL_MEI_SAP_SHARED_MEM_SZ_VER4 \
+ (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \
+ SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ_VER4 + \
+ SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4)
struct iwl_mei_shared_mem_ptrs {
struct iwl_sap_shared_mem_ctrl_blk *ctrl;
@@ -206,6 +208,7 @@ struct iwl_mei {
* @mac_address: interface MAC address.
* @nvm_address: NVM MAC address.
* @priv: A pointer to iwlwifi.
+ * @sap_version: The SAP version to use. enum iwl_mei_sap_version.
*
* This used to cache the configurations coming from iwlwifi's way. The data
* is cached here so that we can buffer the configuration even if we don't have
@@ -220,6 +223,7 @@ struct iwl_mei_cache {
u16 mcc;
u8 mac_address[6];
u8 nvm_address[6];
+ enum iwl_mei_sap_version sap_version;
void *priv;
};
@@ -238,14 +242,17 @@ static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev)
#define HBM_DMA_BUF_ID_WLAN 1
-static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
+static int iwl_mei_alloc_mem_for_version(struct mei_cl_device *cldev,
+ enum iwl_mei_sap_version version)
{
struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
+ u32 mem_size = roundup(version == IWL_MEI_SAP_VERSION_4 ?
+ _IWL_MEI_SAP_SHARED_MEM_SZ_VER4 :
+ _IWL_MEI_SAP_SHARED_MEM_SZ_VER3, PAGE_SIZE);
- mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN,
- IWL_MEI_SAP_SHARED_MEM_SZ);
-
+ iwl_mei_cache.sap_version = version;
+ mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN, mem_size);
if (IS_ERR(mem->ctrl)) {
int ret = PTR_ERR(mem->ctrl);
@@ -254,11 +261,30 @@ static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
return ret;
}
- memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ);
+ memset(mem->ctrl, 0, mem_size);
return 0;
}
+static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
+{
+ int ret;
+
+ /*
+ * SAP version 4 uses a larger Host to MEI notif queue.
+ * Since it is unknown at this stage which SAP version is used by the
+ * CSME firmware on this platform, try to allocate the version 4 first.
+ * If the CSME firmware uses version 3, this allocation is expected to
+ * fail because the CSME firmware allocated less memory for our driver.
+ */
+ ret = iwl_mei_alloc_mem_for_version(cldev, IWL_MEI_SAP_VERSION_4);
+ if (ret)
+ ret = iwl_mei_alloc_mem_for_version(cldev,
+ IWL_MEI_SAP_VERSION_3);
+
+ return ret;
+}
+
static void iwl_mei_init_shared_mem(struct iwl_mei *mei)
{
struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
@@ -277,7 +303,9 @@ static void iwl_mei_init_shared_mem(struct iwl_mei *mei)
h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
cpu_to_le32(SAP_H2M_DATA_Q_SZ);
h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
- cpu_to_le32(SAP_H2M_NOTIF_Q_SZ);
+ iwl_mei_cache.sap_version == IWL_MEI_SAP_VERSION_3 ?
+ cpu_to_le32(SAP_H2M_NOTIF_Q_SZ_VER3) :
+ cpu_to_le32(SAP_H2M_NOTIF_Q_SZ_VER4);
m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
cpu_to_le32(SAP_M2H_DATA_Q_SZ);
m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
@@ -647,7 +675,7 @@ iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
return;
}
- if (rsp->supported_version != SAP_VERSION) {
+ if (rsp->supported_version != iwl_mei_cache.sap_version) {
dev_err(&cldev->dev,
"didn't get the expected version: got %d\n",
rsp->supported_version);
@@ -1281,7 +1309,7 @@ static int iwl_mei_send_start(struct mei_cl_device *cldev)
.hdr.type = cpu_to_le32(SAP_ME_MSG_START),
.hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
.hdr.len = cpu_to_le32(sizeof(msg)),
- .supported_versions[0] = SAP_VERSION,
+ .supported_versions[0] = iwl_mei_cache.sap_version,
.init_data_seq_num = cpu_to_le16(0x100),
.init_notif_seq_num = cpu_to_le16(0x800),
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 8873904f51ec..59751f123571 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -13,7 +13,7 @@ iwlmvm-y += ptp.o
iwlmvm-y += time-sync.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
-iwlmvm-$(CONFIG_PM) += d3.o
+iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
iwlmvm-$(CONFIG_IWLMEI) += vendor-cmd.o
subdir-ccflags-y += -I $(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index ad3e14a0d043..b607961970e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -208,7 +208,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
}
struct iwl_bt_iterator_data {
- struct iwl_bt_coex_profile_notif *notif;
+ struct iwl_bt_coex_prof_old_notif *notif;
struct iwl_mvm *mvm;
struct ieee80211_chanctx_conf *primary;
struct ieee80211_chanctx_conf *secondary;
@@ -266,10 +266,26 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
bool have_wifi_loss_rate =
iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
- BT_PROFILE_NOTIFICATION, 0) > 4;
+ BT_PROFILE_NOTIFICATION, 0) > 4 ||
+ iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP,
+ PROFILE_NOTIF, 0) >= 1;
+ u8 wifi_loss_mid_high_rssi;
+ u8 wifi_loss_low_rssi;
u8 wifi_loss_rate;
- if (mvm->last_bt_notif.wifi_loss_low_rssi == BT_OFF)
+ if (iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP,
+ PROFILE_NOTIF, 0) >= 1) {
+ /* For now, we consider 2.4 GHz band / ANT_A only */
+ wifi_loss_mid_high_rssi =
+ mvm->last_bt_wifi_loss.wifi_loss_mid_high_rssi[PHY_BAND_24][0];
+ wifi_loss_low_rssi =
+ mvm->last_bt_wifi_loss.wifi_loss_low_rssi[PHY_BAND_24][0];
+ } else {
+ wifi_loss_mid_high_rssi = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ wifi_loss_low_rssi = mvm->last_bt_notif.wifi_loss_low_rssi;
+ }
+
+ if (wifi_loss_low_rssi == BT_OFF)
return true;
if (primary)
@@ -286,20 +302,20 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
* we will get an update on this and exit eSR.
*/
if (!link_rssi)
- wifi_loss_rate = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ wifi_loss_rate = wifi_loss_mid_high_rssi;
else if (mvmvif->esr_active)
/* RSSI needs to get really low to disable eSR... */
wifi_loss_rate =
link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
- mvm->last_bt_notif.wifi_loss_low_rssi :
- mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ wifi_loss_low_rssi :
+ wifi_loss_mid_high_rssi;
else
/* ...And really high before we enable it back */
wifi_loss_rate =
link_rssi <= -IWL_MVM_BT_COEX_ENABLE_ESR_THRESH ?
- mvm->last_bt_notif.wifi_loss_low_rssi :
- mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ wifi_loss_low_rssi :
+ wifi_loss_mid_high_rssi;
return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
}
@@ -509,6 +525,35 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id);
}
+/* must be called under rcu_read_lock */
+static void iwl_mvm_bt_coex_notif_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = _data;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ for (int link_id = 0;
+ link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+ link_id++) {
+ struct ieee80211_bss_conf *link_conf =
+ rcu_dereference_check(vif->link_conf[link_id],
+ lockdep_is_held(&mvm->mutex));
+ struct ieee80211_chanctx_conf *chanctx_conf =
+ rcu_dereference_check(link_conf->chanctx_conf,
+ lockdep_is_held(&mvm->mutex));
+
+ if ((!chanctx_conf ||
+ chanctx_conf->def.chan->band != NL80211_BAND_2GHZ))
+ continue;
+
+ iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
+ }
+}
+
static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
{
struct iwl_bt_iterator_data data = {
@@ -591,11 +636,11 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
}
}
-void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
+void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
+ struct iwl_bt_coex_prof_old_notif *notif = (void *)pkt->data;
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
@@ -612,6 +657,22 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
iwl_mvm_bt_coex_notif_handle(mvm);
}
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ const struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ const struct iwl_bt_coex_profile_notif *notif = (const void *)pkt->data;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvm->last_bt_wifi_loss = *notif;
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bt_coex_notif_iterator,
+ mvm);
+}
+
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data rssi_event)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index c4c1e67b9ac7..ddf484027d4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -16,6 +16,8 @@
#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0
#define IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC 30
#define IWL_MVM_TPT_COUNT_WINDOW_SEC 5
+#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS 5
+#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH 11
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
@@ -109,7 +111,7 @@
#define IWL_MVM_FTM_INITIATOR_SECURE_LTF false
#define IWL_MVM_FTM_RESP_NDP_SUPPORT true
#define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true
-#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 5
+#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 7
#define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000
#define IWL_MVM_D3_DEBUG false
#define IWL_MVM_USE_TWT true
@@ -125,7 +127,6 @@
#define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */
#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16
#define IWL_MVM_AUTO_EML_ENABLE true
-#define IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH 7
#define IWL_MVM_HIGH_RSSI_THRESH_20MHZ -67
#define IWL_MVM_LOW_RSSI_THRESH_20MHZ -71
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index b4d650583ac2..49a6aff42376 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -3439,6 +3439,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_lock(&mvm->mutex);
+ /* Apparently, the device went away and device_powered_off() was called,
+ * don't even try to read the rt_status, the device is currently
+ * inaccessible.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
+ IWL_INFO(mvm,
+ "Can't resume, device_powered_off() was called during wowlan\n");
+ goto err;
+ }
+
mvm->last_reset_or_resume_time_jiffies = jiffies;
/* get the BSS vif pointer again */
@@ -3758,7 +3768,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
}
const struct file_operations iwl_dbgfs_d3_test_ops = {
- .llseek = no_llseek,
.open = iwl_mvm_d3_test_open,
.read = iwl_mvm_d3_test_read,
.release = iwl_mvm_d3_test_release,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index afd90a52d4ec..55245f913286 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -772,6 +772,7 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_ftm_iter_data target;
target.bssid = bssid;
+ target.cipher = cipher;
ieee80211_iter_keys(mvm->hw, vif, iter, &target);
} else {
memcpy(tk, entry->tk, sizeof(entry->tk));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 08c4898c8f1a..08546e673cf5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -863,7 +863,10 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
- struct iwl_dev_tx_power_cmd cmd = {
+ struct iwl_dev_tx_power_cmd_v3_v8 cmd = {
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+ };
+ struct iwl_dev_tx_power_cmd cmd_v9_v10 = {
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
__le16 *per_chain;
@@ -871,8 +874,19 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
u16 len = 0;
u32 n_subbands;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
+ void *cmd_data = &cmd;
- if (cmd_ver >= 7) {
+ if (cmd_ver == 10) {
+ len = sizeof(cmd_v9_v10.v10);
+ n_subbands = IWL_NUM_SUB_BANDS_V2;
+ per_chain = &cmd_v9_v10.v10.per_chain[0][0][0];
+ cmd_v9_v10.v10.flags =
+ cpu_to_le32(mvm->fwrt.reduced_power_flags);
+ } else if (cmd_ver == 9) {
+ len = sizeof(cmd_v9_v10.v9);
+ n_subbands = IWL_NUM_SUB_BANDS_V1;
+ per_chain = &cmd_v9_v10.v9.per_chain[0][0];
+ } else if (cmd_ver >= 7) {
len = sizeof(cmd.v7);
n_subbands = IWL_NUM_SUB_BANDS_V2;
per_chain = cmd.v7.per_chain[0][0];
@@ -899,9 +913,14 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
per_chain = cmd.v3.per_chain[0][0];
}
- /* all structs have the same common part, add it */
+ /* all structs have the same common part, add its length */
len += sizeof(cmd.common);
+ if (cmd_ver < 9)
+ len += sizeof(cmd.per_band);
+ else
+ cmd_data = &cmd_v9_v10;
+
ret = iwl_sar_fill_profile(&mvm->fwrt, per_chain,
IWL_NUM_CHAIN_TABLES,
n_subbands, prof_a, prof_b);
@@ -913,7 +932,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
iwl_mei_set_power_limit(per_chain);
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
- return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, cmd_data);
}
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
@@ -1464,7 +1483,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL);
}
- for (i = 0; i < IWL_MVM_FW_MAX_LINK_ID + 1; i++)
+ for (i = 0; i < IWL_FW_MAX_LINK_ID + 1; i++)
RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL);
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index a9929aa49913..2b0652168002 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -17,7 +17,8 @@
HOW(EXIT_COEX) \
HOW(EXIT_BANDWIDTH) \
HOW(EXIT_CSA) \
- HOW(EXIT_LINK_USAGE)
+ HOW(EXIT_LINK_USAGE) \
+ HOW(EXIT_FAIL_ENTRY)
static const char *const iwl_mvm_esr_states_names[] = {
#define NAME_ENTRY(x) [ilog2(IWL_MVM_ESR_##x)] = #x,
@@ -233,10 +234,15 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
WARN_ON_ONCE(active == link_info->active);
/* When deactivating a link session protection should
- * be stopped
+ * be stopped. Also let the firmware know if we can't Tx.
*/
- if (!active && vif->type == NL80211_IFTYPE_STATION)
+ if (!active && vif->type == NL80211_IFTYPE_STATION) {
iwl_mvm_stop_session_protection(mvm, vif);
+ if (link_info->csa_block_tx) {
+ cmd.block_tx = 1;
+ link_info->csa_block_tx = false;
+ }
+ }
}
cmd.link_id = cpu_to_le32(link_info->fw_link_id);
@@ -258,7 +264,7 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid)
memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN);
- iwl_mvm_set_fw_basic_rates(mvm, vif, link_conf,
+ iwl_mvm_set_fw_basic_rates(mvm, vif, link_info,
&cmd.cck_rates, &cmd.ofdm_rates);
cmd.cck_short_preamble = cpu_to_le32(link_conf->use_short_preamble);
@@ -293,6 +299,17 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
(link_conf->uora_ocw_range >> 3) & 0x7;
}
+ /* ap_sta may be NULL if we're disconnecting */
+ if (changes & LINK_CONTEXT_MODIFY_HE_PARAMS && mvmvif->ap_sta) {
+ struct ieee80211_link_sta *link_sta =
+ link_sta_dereference_check(mvmvif->ap_sta, link_id);
+
+ if (!WARN_ON(!link_sta) && link_sta->he_cap.has_he &&
+ link_sta->he_cap.he_cap_elem.mac_cap_info[5] &
+ IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX)
+ cmd.ul_mu_data_disable = 1;
+ }
+
/* TODO how to set ndp_fdbk_buff_th_exp? */
if (iwl_mvm_set_fw_mu_edca_params(mvm, mvmvif->link[link_id],
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index dfcc96f18b4f..a7a10e716e65 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -413,19 +413,18 @@ static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
}
void iwl_mvm_set_fw_basic_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf,
+ struct iwl_mvm_vif_link_info *link_info,
__le32 *cck_rates, __le32 *ofdm_rates)
{
- struct ieee80211_chanctx_conf *chanctx;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
u8 cck_ack_rates = 0, ofdm_ack_rates = 0;
+ enum nl80211_band band = NL80211_BAND_2GHZ;
- rcu_read_lock();
- chanctx = rcu_dereference(link_conf->chanctx_conf);
- iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
- : NL80211_BAND_2GHZ,
- &cck_ack_rates, &ofdm_ack_rates);
+ phy_ctxt = link_info->phy_ctxt;
+ if (phy_ctxt && phy_ctxt->channel)
+ band = phy_ctxt->channel->band;
- rcu_read_unlock();
+ iwl_mvm_ack_rates(mvm, vif, band, &cck_ack_rates, &ofdm_ack_rates);
*cck_rates = cpu_to_le32((u32)cck_ack_rates);
*ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates);
@@ -563,7 +562,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
else
eth_broadcast_addr(cmd->bssid_addr);
- iwl_mvm_set_fw_basic_rates(mvm, vif, &vif->bss_conf, &cmd->cck_rates,
+ iwl_mvm_set_fw_basic_rates(mvm, vif, &mvmvif->deflink, &cmd->cck_rates,
&cmd->ofdm_rates);
cmd->cck_short_preamble =
@@ -1528,7 +1527,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
if (!iwl_mvm_is_short_beacon_notif_supported(mvm)) {
- struct iwl_mvm_tx_resp *beacon_notify_hdr =
+ struct iwl_tx_resp *beacon_notify_hdr =
&beacon_v5->beacon_notify_hdr;
if (unlikely(pkt_len < sizeof(*beacon_v5)))
@@ -1586,11 +1585,11 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
}
}
-void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
+static void
+iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm,
+ const struct iwl_missed_beacons_notif *mb,
+ struct iwl_rx_packet *pkt)
{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig;
struct iwl_fw_dbg_trigger_tlv *trigger;
u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
@@ -1604,6 +1603,16 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
MISSED_BEACONS_NOTIFICATION,
0);
+ u8 new_notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ MISSED_BEACONS_NOTIF, 0);
+
+ /* If the firmware uses the new notification (from MAC_CONF_GROUP),
+ * refer to that notification's version.
+ * Note that the new notification from MAC_CONF_GROUP starts from
+ * version 5.
+ */
+ if (new_notif_ver)
+ notif_ver = new_notif_ver;
/* before version four the ID in the notification refers to mac ID */
if (notif_ver < 4) {
@@ -1620,13 +1629,11 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
}
IWL_DEBUG_INFO(mvm,
- "missed bcn %s_id=%u, consecutive=%u (%u, %u, %u)\n",
+ "missed bcn %s_id=%u, consecutive=%u (%u)\n",
notif_ver < 4 ? "mac" : "link",
id,
le32_to_cpu(mb->consec_missed_beacons),
- le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
- le32_to_cpu(mb->num_recvd_beacons),
- le32_to_cpu(mb->num_expected_beacons));
+ le32_to_cpu(mb->consec_missed_beacons_since_last_rx));
if (!vif)
return;
@@ -1656,10 +1663,27 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
"missed_beacons:%d, missed_beacons_since_rx:%d\n",
rx_missed_bcon, rx_missed_bcon_since_rx);
}
- } else if (rx_missed_bcon >= IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH &&
- link_id >= 0 && hweight16(vif->active_links) > 1) {
- iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_MISSED_BEACON,
- iwl_mvm_get_other_link(vif, link_id));
+ } else if (link_id >= 0 && hweight16(vif->active_links) > 1) {
+ u32 scnd_lnk_bcn_lost = 0;
+
+ if (notif_ver >= 5 &&
+ !IWL_FW_CHECK(mvm,
+ le32_to_cpu(mb->other_link_id) == IWL_MVM_FW_LINK_ID_INVALID,
+ "No data for other link id but we are in EMLSR active_links: 0x%x\n",
+ vif->active_links))
+ scnd_lnk_bcn_lost =
+ le32_to_cpu(mb->consec_missed_beacons_other_link);
+
+ /* Exit EMLSR if we lost more than
+ * IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links
+ * OR more than IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH on any link.
+ */
+ if ((rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS &&
+ scnd_lnk_bcn_lost >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS) ||
+ rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH)
+ iwl_mvm_exit_esr(mvm, vif,
+ IWL_MVM_ESR_EXIT_MISSED_BEACON,
+ iwl_mvm_get_primary_link(vif));
} else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) {
if (!iwl_mvm_has_new_tx_api(mvm))
ieee80211_beacon_loss(vif);
@@ -1687,6 +1711,31 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
}
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ iwl_mvm_handle_missed_beacons_notif(mvm, (const void *)pkt->data, pkt);
+}
+
+void iwl_mvm_rx_missed_beacons_notif_legacy(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ const struct iwl_missed_beacons_notif_v4 *mb_v4 =
+ (const void *)pkt->data;
+ struct iwl_missed_beacons_notif mb = {
+ .link_id = mb_v4->link_id,
+ .consec_missed_beacons = mb_v4->consec_missed_beacons,
+ .consec_missed_beacons_since_last_rx =
+ mb_v4->consec_missed_beacons_since_last_rx,
+ .other_link_id = cpu_to_le32(IWL_MVM_FW_LINK_ID_INVALID),
+ };
+
+ iwl_mvm_handle_missed_beacons_notif(mvm, &mb, pkt);
+}
+
void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 835a05b91833..a327893c6dce 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -165,12 +165,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
mvm->lar_regdom_set = true;
mvm->mcc_src = src_id;
- /* Some kind of regulatory mess means we need to currently disallow
- * puncturing in the US and Canada. Do that here, at least until we
- * figure out the new chanctx APIs for puncturing.
- */
- if (resp->mcc == cpu_to_le16(IWL_MCC_US) ||
- resp->mcc == cpu_to_le16(IWL_MCC_CANADA))
+ if (!iwl_puncturing_is_allowed_in_bios(mvm->bios_enable_puncturing,
+ le16_to_cpu(resp->mcc)))
ieee80211_hw_set(mvm->hw, DISALLOW_PUNCTURING);
else
__clear_bit(IEEE80211_HW_DISALLOW_PUNCTURING, mvm->hw->flags);
@@ -639,10 +635,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_P2P_GO_OPPPS |
NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
- NL80211_FEATURE_DYNAMIC_SMPS |
- NL80211_FEATURE_STATIC_SMPS |
NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
+ /* when firmware supports RLC/SMPS offload, do not set these
+ * driver features, since it's no longer supported by driver.
+ */
+ if (!iwl_mvm_has_rlc_offload(mvm))
+ hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS |
+ NL80211_FEATURE_DYNAMIC_SMPS;
+
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
@@ -838,20 +839,10 @@ void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
if (ieee80211_is_mgmt(hdr->frame_control))
sta = NULL;
- /* If there is no sta, and it's not offchannel - send through AP */
+ /* this shouldn't even happen: just drop */
if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION &&
- !offchannel) {
- struct iwl_mvm_vif *mvmvif =
- iwl_mvm_vif_from_mac80211(info->control.vif);
- u8 ap_sta_id = READ_ONCE(mvmvif->deflink.ap_sta_id);
-
- if (ap_sta_id < mvm->fw->ucode_capa.num_stations) {
- /* mac80211 holds rcu read lock */
- sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
- if (IS_ERR_OR_NULL(sta))
- goto drop;
- }
- }
+ !offchannel)
+ goto drop;
if (tmp_sta && !sta && link_id != IEEE80211_LINK_UNSPECIFIED &&
!ieee80211_is_probe_resp(hdr->frame_control)) {
@@ -1241,7 +1232,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
mvm->nvm_data = NULL;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/* fast_resume will be cleared by iwl_mvm_fast_resume */
fast_resume = mvm->fast_resume;
@@ -1263,7 +1254,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
}
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
/*
@@ -1480,19 +1471,33 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
int len;
- struct iwl_dev_tx_power_cmd cmd = {
+ struct iwl_dev_tx_power_cmd_v3_v8 cmd = {
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
.common.mac_context_id =
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
- .common.pwr_restriction = cpu_to_le16(8 * tx_power),
};
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
-
- if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
- cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
-
- if (cmd_ver == 8)
+ struct iwl_dev_tx_power_cmd cmd_v9_v10;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
+ u16 u_tx_power = tx_power == IWL_DEFAULT_MAX_TX_POWER ?
+ IWL_DEV_MAX_TX_POWER : 8 * tx_power;
+ void *cmd_data = &cmd;
+
+ cmd.common.pwr_restriction = cpu_to_le16(u_tx_power);
+
+ if (cmd_ver > 8) {
+ /* Those fields sit on the same place for v9 and v10 */
+ cmd_v9_v10.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC);
+ cmd_v9_v10.common.mac_context_id =
+ cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id);
+ cmd_v9_v10.common.pwr_restriction = cpu_to_le16(u_tx_power);
+ cmd_data = &cmd_v9_v10;
+ }
+
+ if (cmd_ver == 10)
+ len = sizeof(cmd_v9_v10.v10);
+ else if (cmd_ver == 9)
+ len = sizeof(cmd_v9_v10.v9);
+ else if (cmd_ver == 8)
len = sizeof(cmd.v8);
else if (cmd_ver == 7)
len = sizeof(cmd.v7);
@@ -1507,10 +1512,14 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
else
len = sizeof(cmd.v3);
- /* all structs have the same common part, add it */
+ /* all structs have the same common part, add its length */
len += sizeof(cmd.common);
- return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
+ if (cmd_ver < 9)
+ len += sizeof(cmd.per_band);
+
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, cmd_data);
+
}
static void iwl_mvm_post_csa_tx(void *data, struct ieee80211_sta *sta)
@@ -5818,6 +5827,10 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
int i;
if (!iwl_mvm_has_new_tx_api(mvm)) {
+ /* we can't ask the firmware anything if it is dead */
+ if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status))
+ return;
if (drop) {
guard(mvm)(mvm);
iwl_mvm_flush_tx_path(mvm,
@@ -5911,8 +5924,11 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/* this can take a while, and we may need/want other operations
* to succeed while doing this, so do it without the mutex held
+ * If the firmware is dead, this can't work...
*/
- if (!drop && !iwl_mvm_has_new_tx_api(mvm))
+ if (!drop && !iwl_mvm_has_new_tx_api(mvm) &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status))
iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
index 8a38fc4b0b0f..455f5f417506 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
@@ -144,7 +144,7 @@ static void iwl_mvm_mld_update_sta_key(struct ieee80211_hw *hw,
if (sta != data->sta || key->link_id >= 0)
return;
- err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, sizeof(cmd), &cmd);
+ err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
if (err)
data->err = err;
@@ -162,8 +162,8 @@ int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm,
.new_sta_mask = new_sta_mask,
};
- ieee80211_iter_keys_rcu(mvm->hw, vif, iwl_mvm_mld_update_sta_key,
- &data);
+ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_mld_update_sta_key,
+ &data);
return data.err;
}
@@ -402,7 +402,7 @@ void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm,
if (!sec_key_ver)
return;
- ieee80211_iter_keys_rcu(mvm->hw, vif,
- iwl_mvm_sec_key_remove_ap_iter,
- (void *)(uintptr_t)link_id);
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_sec_key_remove_ap_iter,
+ (void *)(uintptr_t)link_id);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 3c99396ad369..f2378e0fb2fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -223,7 +223,7 @@ static void iwl_mvm_restart_mpdu_count(struct iwl_mvm *mvm,
spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
}
- IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n");
+ IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n");
}
static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
@@ -269,6 +269,9 @@ static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
*/
iwl_mvm_restart_mpdu_count(mvm, mvmvif);
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_UP,
+ NULL);
+
return ret;
}
@@ -456,6 +459,9 @@ static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm,
/* Start a new counting window */
iwl_mvm_restart_mpdu_count(mvm, mvmvif);
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_DOWN,
+ NULL);
+
return ret;
}
@@ -1335,6 +1341,22 @@ iwl_mvm_mld_mac_pre_channel_switch(struct ieee80211_hw *hw,
else
selected = primary;
+ /*
+ * remembers to tell the firmware that this link can't tx
+ * Note that this logic seems to be unrelated to esr, but it
+ * really is needed only when esr is active. When we have a
+ * single link, the firmware will handle all this on its own.
+ * In multi-link scenarios, we can learn about the CSA from
+ * another link and this logic is too complex for the firmware
+ * to track.
+ * Since we want to de-activate the link that got a CSA, we
+ * need to tell the firmware not to send any frame on that link
+ * as the firmware may not be aware that link is under a CSA
+ * with mode=1 (no Tx allowed).
+ */
+ if (chsw->block_tx && mvmvif->link[chsw->link_id])
+ mvmvif->link[chsw->link_id]->csa_block_tx = true;
+
iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_CSA, selected);
mutex_unlock(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index d5a204e52076..28a9d90ad1cd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -46,7 +46,7 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
static int iwl_mvm_mld_send_sta_cmd(struct iwl_mvm *mvm,
- struct iwl_mvm_sta_cfg_cmd *cmd)
+ struct iwl_sta_cfg_cmd *cmd)
{
int ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
@@ -63,7 +63,7 @@ static int iwl_mvm_mld_add_int_sta_to_fw(struct iwl_mvm *mvm,
struct iwl_mvm_int_sta *sta,
const u8 *addr, int link_id)
{
- struct iwl_mvm_sta_cfg_cmd cmd;
+ struct iwl_sta_cfg_cmd cmd;
lockdep_assert_held(&mvm->mutex);
@@ -94,7 +94,7 @@ static int iwl_mvm_mld_add_int_sta_to_fw(struct iwl_mvm *mvm,
*/
static int iwl_mvm_mld_rm_sta_from_fw(struct iwl_mvm *mvm, u32 sta_id)
{
- struct iwl_mvm_remove_sta_cmd rm_sta_cmd = {
+ struct iwl_remove_sta_cmd rm_sta_cmd = {
.sta_id = cpu_to_le32(sta_id),
};
int ret;
@@ -216,7 +216,7 @@ int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
const u8 *baddr = _baddr;
unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ iwl_mvm_get_wd_timeout(mvm, vif);
u16 *queue;
lockdep_assert_held(&mvm->mutex);
@@ -254,7 +254,7 @@ int iwl_mvm_mld_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *msta = &mvm_link->mcast_sta;
static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
const u8 *maddr = _maddr;
- unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif);
lockdep_assert_held(&mvm->mutex);
@@ -438,7 +438,7 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_vif_link_info *link_info =
mvm_vif->link[link_conf->link_id];
- struct iwl_mvm_sta_cfg_cmd cmd = {
+ struct iwl_sta_cfg_cmd cmd = {
.sta_id = cpu_to_le32(mvm_link_sta->sta_id),
.station_type = cpu_to_le32(mvm_sta->sta_type),
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 22f48b66d79c..ef07cff203b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -299,6 +299,7 @@ struct iwl_probe_resp_data {
* @active: indicates the link is active in FW (for sanity checking)
* @cab_queue: content-after-beacon (multicast) queue
* @listen_lmac: indicates this link is allocated to the listen LMAC
+ * @csa_block_tx: we got CSA with mode=1
* @mcast_sta: multicast station
* @phy_ctxt: phy context allocated to this link, if any
* @bf_data: beacon filtering data
@@ -324,6 +325,7 @@ struct iwl_mvm_vif_link_info {
bool he_ru_2mhz_block;
bool active;
bool listen_lmac;
+ bool csa_block_tx;
u16 cab_queue;
/* Assigned while mac80211 has the link in a channel context,
@@ -368,6 +370,7 @@ struct iwl_mvm_vif_link_info {
* preventing the enablement of EMLSR
* @IWL_MVM_ESR_EXIT_CSA: CSA happened, so exit EMLSR
* @IWL_MVM_ESR_EXIT_LINK_USAGE: Exit EMLSR due to low tpt on secondary link
+ * @IWL_MVM_ESR_EXIT_FAIL_ENTRY: Exit EMLSR due to entry failure
*/
enum iwl_mvm_esr_state {
IWL_MVM_ESR_BLOCKED_PREVENTION = 0x1,
@@ -382,6 +385,7 @@ enum iwl_mvm_esr_state {
IWL_MVM_ESR_EXIT_BANDWIDTH = 0x80000,
IWL_MVM_ESR_EXIT_CSA = 0x100000,
IWL_MVM_ESR_EXIT_LINK_USAGE = 0x200000,
+ IWL_MVM_ESR_EXIT_FAIL_ENTRY = 0x400000,
};
#define IWL_MVM_BLOCK_ESR_REASONS 0xffff
@@ -508,7 +512,7 @@ struct iwl_mvm_vif {
bool bf_enabled;
bool ba_enabled;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/* WoWLAN GTK rekey data */
struct {
u8 kck[NL80211_KCK_EXT_LEN];
@@ -770,7 +774,6 @@ struct iwl_mvm_tcm {
* @num_stored: number of mpdus stored in the buffer
* @queue: queue of this reorder buffer
* @last_amsdu: track last ASMDU SN for duplication detection
- * @last_sub_index: track ASMDU sub frame index for duplication detection
* @valid: reordering is valid for this queue
* @lock: protect reorder buffer internal state
*/
@@ -779,7 +782,6 @@ struct iwl_mvm_reorder_buffer {
u16 num_stored;
int queue;
u16 last_amsdu;
- u8 last_sub_index;
bool valid;
spinlock_t lock;
} ____cacheline_aligned_in_smp;
@@ -1074,8 +1076,8 @@ struct iwl_mvm {
/* data related to data path */
struct iwl_rx_phy_info last_phy_info;
- struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX];
- struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_MVM_STATION_COUNT_MAX];
+ struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_STATION_COUNT_MAX];
+ struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX];
u8 rx_ba_sessions;
/* configured by mac80211 */
@@ -1164,7 +1166,7 @@ struct iwl_mvm {
struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER];
- struct ieee80211_bss_conf __rcu *link_id_to_link_conf[IWL_MVM_FW_MAX_LINK_ID + 1];
+ struct ieee80211_bss_conf __rcu *link_id_to_link_conf[IWL_FW_MAX_LINK_ID + 1];
/* -1 for always, 0 for never, >0 for that many times */
s8 fw_restart;
@@ -1176,7 +1178,7 @@ struct iwl_mvm {
struct ieee80211_vif *p2p_device_vif;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
struct wiphy_wowlan_support wowlan;
int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
@@ -1200,8 +1202,11 @@ struct iwl_mvm {
wait_queue_head_t rx_sync_waitq;
- /* BT-Coex */
- struct iwl_bt_coex_profile_notif last_bt_notif;
+ /* BT-Coex - only one of those will be used */
+ union {
+ struct iwl_bt_coex_prof_old_notif last_bt_notif;
+ struct iwl_bt_coex_profile_notif last_bt_wifi_loss;
+ };
struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
u8 bt_tx_prio;
@@ -1365,6 +1370,7 @@ struct iwl_mvm {
struct iwl_mvm_acs_survey *acs_survey;
bool statistics_clear;
+ u32 bios_enable_puncturing;
};
/* Extract MVM priv from op_mode and _hw */
@@ -1700,9 +1706,9 @@ static inline struct agg_tx_status *
iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp)
{
if (iwl_mvm_has_new_tx_api(mvm))
- return &((struct iwl_mvm_tx_resp *)tx_resp)->status;
+ return &((struct iwl_tx_resp *)tx_resp)->status;
else
- return ((struct iwl_mvm_tx_resp_v3 *)tx_resp)->status;
+ return ((struct iwl_tx_resp_v3 *)tx_resp)->status;
}
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
@@ -1745,7 +1751,7 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
if (iwl_mvm_is_esr_supported(trans) ||
(CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))
- return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM;
+ return IWL_FW_MAX_ACTIVE_LINKS_NUM;
return 1;
}
@@ -1764,6 +1770,13 @@ static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
return iwl_mvm_ac_to_tx_fifo[ac];
}
+static inline bool iwl_mvm_has_rlc_offload(struct iwl_mvm *mvm)
+{
+ return iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD),
+ 0) >= 3;
+}
+
struct iwl_rate_info {
u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
@@ -2003,7 +2016,7 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_set_fw_basic_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf,
+ struct iwl_mvm_vif_link_info *link_info,
__le32 *cck_rates, __le32 *ofdm_rates);
void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -2062,6 +2075,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_beacons_notif_legacy(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
@@ -2291,7 +2306,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int idx);
extern const struct file_operations iwl_dbgfs_d3_test_ops;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_fast_suspend(struct iwl_mvm *mvm);
@@ -2322,6 +2337,8 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
/* BT Coex */
int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm);
+void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -2570,8 +2587,7 @@ u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed);
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool tdls, bool cmd_q);
+ struct ieee80211_vif *vif);
void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
const char *errmsg);
void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 836ca22597bc..80ec59c58ae4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -611,6 +611,7 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
char mcc[3];
struct ieee80211_regdomain *regd;
int wgds_tbl_idx;
+ bool changed = false;
lockdep_assert_held(&mvm->mutex);
@@ -630,10 +631,15 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
IWL_DEBUG_LAR(mvm,
"RX: received chub update mcc cmd (mcc '%s' src %d)\n",
mcc, src);
- regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
+ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, &changed);
if (IS_ERR_OR_NULL(regd))
return;
+ if (!changed) {
+ IWL_DEBUG_LAR(mvm, "RX: No change in the regulatory data\n");
+ goto out;
+ }
+
wgds_tbl_idx = iwl_mvm_get_sar_geo_profile(mvm);
if (wgds_tbl_idx < 1)
IWL_DEBUG_INFO(mvm,
@@ -644,5 +650,7 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
wgds_tbl_idx);
regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+
+out:
kfree(regd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index b7dcae76a05d..4dd4a9d5c71f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -159,6 +159,43 @@ static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm,
iwl_mvm_get_primary_link(vif));
}
+static void iwl_mvm_rx_esr_trans_fail_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_esr_trans_fail_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
+ u8 fw_link_id = le32_to_cpu(notif->link_id);
+ struct ieee80211_bss_conf *bss_conf;
+
+ if (IS_ERR_OR_NULL(vif))
+ return;
+
+ IWL_DEBUG_INFO(mvm, "Failed to %s eSR on link %d, reason %d\n",
+ le32_to_cpu(notif->activation) ? "enter" : "exit",
+ le32_to_cpu(notif->link_id),
+ le32_to_cpu(notif->err_code));
+
+ /* we couldn't go back to single link, disconnect */
+ if (!le32_to_cpu(notif->activation)) {
+ iwl_mvm_connection_loss(mvm, vif, "emlsr exit failed");
+ return;
+ }
+
+ bss_conf = iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, fw_link_id, false);
+ if (IWL_FW_CHECK(mvm, !bss_conf,
+ "FW reported failure to activate EMLSR on a non-existing link: %d\n",
+ fw_link_id))
+ return;
+
+ /*
+ * We failed to activate the second link and enter EMLSR, we need to go
+ * back to single link.
+ */
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_FAIL_ENTRY,
+ bss_conf->link_id);
+}
+
static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -261,6 +298,12 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_thermal_dual_chain_request *req = (void *)pkt->data;
+ /* firmware is expected to handle that in RLC offload mode */
+ if (IWL_FW_CHECK(mvm, iwl_mvm_has_rlc_offload(mvm),
+ "Got THERMAL_DUAL_CHAIN_REQUEST (0x%x) in RLC offload mode\n",
+ req->event))
+ return;
+
/*
* We could pass it to the iterator data, but also need to remember
* it for new interfaces that are added while in this state.
@@ -325,7 +368,7 @@ struct iwl_rx_handlers {
*/
static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC,
- struct iwl_mvm_tx_resp),
+ struct iwl_tx_resp),
RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC,
struct iwl_mvm_ba_notif),
@@ -333,9 +376,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC,
struct iwl_tlc_update_notif),
- RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
+ RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_old_notif,
RX_HANDLER_ASYNC_LOCKED_WIPHY,
- struct iwl_bt_coex_profile_notif),
+ struct iwl_bt_coex_prof_old_notif),
+ RX_HANDLER_GRP(BT_COEX_GROUP, PROFILE_NOTIF, iwl_mvm_rx_bt_coex_notif,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_bt_coex_profile_notif),
RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
@@ -385,10 +431,15 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
struct iwl_umac_scan_iter_complete_notif),
- RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
+ RX_HANDLER(MISSED_BEACONS_NOTIFICATION,
+ iwl_mvm_rx_missed_beacons_notif_legacy,
RX_HANDLER_ASYNC_LOCKED_WIPHY,
- struct iwl_missed_beacons_notif),
+ struct iwl_missed_beacons_notif_v4),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, MISSED_BEACONS_NOTIF,
+ iwl_mvm_rx_missed_beacons_notif,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_missed_beacons_notif),
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC,
struct iwl_error_resp),
RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
@@ -472,6 +523,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
iwl_mvm_rx_channel_survey_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_umac_scan_channel_survey_notif),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, EMLSR_TRANS_FAIL_NOTIF,
+ iwl_mvm_rx_esr_trans_fail_notif,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_esr_trans_fail_notif),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@@ -602,6 +657,7 @@ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
HCMD_NAME(STA_REMOVE_CMD),
HCMD_NAME(STA_DISABLE_TX_CMD),
HCMD_NAME(ROC_CMD),
+ HCMD_NAME(EMLSR_TRANS_FAIL_NOTIF),
HCMD_NAME(ROC_NOTIF),
HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF),
HCMD_NAME(MISSED_VAP_NOTIF),
@@ -713,6 +769,13 @@ static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
HCMD_NAME(TAS_CONFIG),
};
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_bt_coex_names[] = {
+ HCMD_NAME(PROFILE_NOTIF),
+};
+
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
@@ -722,6 +785,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
[SCAN_GROUP] = HCMD_ARR(iwl_mvm_scan_names),
[LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names),
+ [BT_COEX_GROUP] = HCMD_ARR(iwl_mvm_bt_coex_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
[REGULATORY_AND_NVM_GROUP] =
HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
@@ -1198,10 +1262,12 @@ static void iwl_mvm_trig_link_selection(struct wiphy *wiphy,
struct iwl_mvm *mvm =
container_of(wk, struct iwl_mvm, trig_link_selection_wk);
+ mutex_lock(&mvm->mutex);
ieee80211_iterate_active_interfaces(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_find_link_selection_vif,
NULL);
+ mutex_unlock(&mvm->mutex);
}
static struct iwl_op_mode *
@@ -1221,12 +1287,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
/*
- * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station
+ * We use IWL_STATION_COUNT_MAX to check the validity of the station
* index all over the driver - check that its value corresponds to the
* array size.
*/
BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) !=
- IWL_MVM_STATION_COUNT_MAX);
+ IWL_STATION_COUNT_MAX);
/********************************
* 1. Allocating and configuring HW data
@@ -1285,6 +1351,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
+ mvm->bios_enable_puncturing = iwl_uefi_get_puncturing(&mvm->fwrt);
if (iwl_mvm_has_new_tx_api(mvm)) {
/*
@@ -1384,10 +1451,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
driver_data[2]);
- /* Set a short watchdog for the command queue */
- trans_cfg.cmd_q_wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
-
snprintf(mvm->hw->wiphy->fw_version,
sizeof(mvm->hw->wiphy->fw_version),
"%.31s", fw->fw_version);
@@ -1511,6 +1574,8 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm)
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+ iwl_mvm_pause_tcm(mvm, false);
+
iwl_fw_dbg_stop_sync(&mvm->fwrt);
iwl_trans_stop_device(mvm->trans);
iwl_free_fw_paging(&mvm->fwrt);
@@ -2090,6 +2155,23 @@ static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data);
}
+#ifdef CONFIG_PM_SLEEP
+static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ mutex_lock(&mvm->mutex);
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ iwl_mvm_stop_device(mvm);
+ mvm->fast_resume = false;
+ mutex_unlock(&mvm->mutex);
+}
+#else
+static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
+{}
+#endif
+
#define IWL_MVM_COMMON_OPS \
/* these could be differentiated */ \
.queue_full = iwl_mvm_stop_sw_queue, \
@@ -2102,7 +2184,8 @@ static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
/* as we only register one, these MUST be common! */ \
.start = iwl_op_mode_mvm_start, \
.stop = iwl_op_mode_mvm_stop, \
- .time_point = iwl_op_mode_mvm_time_point
+ .time_point = iwl_op_mode_mvm_time_point, \
+ .device_powered_off = iwl_op_mode_mvm_device_powered_off
static const struct iwl_op_mode_ops iwl_mvm_ops = {
IWL_MVM_COMMON_OPS,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index ce264b386029..7cab5373c8ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -159,7 +159,11 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
.phy_id = cpu_to_le32(ctxt->id),
};
- if (ctxt->rlc_disabled)
+ /* From version 3, RLC is offloaded to firmware, so the driver no
+ * longer needs to send cmd.rlc, note that we are not using any
+ * other fields in the command - don't send it.
+ */
+ if (iwl_mvm_has_rlc_offload(mvm) || ctxt->rlc_disabled)
return 0;
if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 151289e13308..1a0b5f8d4339 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -4,7 +4,7 @@
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include "iwl-trans.h"
@@ -738,8 +738,8 @@ static void iwl_mvm_stats_energy_iter(void *_data,
u8 *energy = _data;
u32 sta_id = mvmsta->deflink.sta_id;
- if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT_MAX, "sta_id %d >= %d",
- sta_id, IWL_MVM_STATION_COUNT_MAX))
+ if (WARN_ONCE(sta_id >= IWL_STATION_COUNT_MAX, "sta_id %d >= %d",
+ sta_id, IWL_STATION_COUNT_MAX))
return;
if (energy[sta_id])
@@ -991,7 +991,7 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
/* The link IDs that doesn't exist will contain 0 */
- for (int link = 0; link < IWL_MVM_FW_MAX_LINK_ID; link++) {
+ for (int link = 0; link < IWL_FW_MAX_LINK_ID; link++) {
total_tx += mvmsta->mpdu_counters[q].per_link[link].tx;
total_rx += mvmsta->mpdu_counters[q].per_link[link].rx;
}
@@ -1009,8 +1009,8 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
}
- IWL_DEBUG_STATS(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
- total_tx, total_rx);
+ IWL_DEBUG_INFO(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
+ total_tx, total_rx);
/* If we don't have enough MPDUs - exit EMLSR */
if (total_tx < IWL_MVM_ENTER_ESR_TPT_THRESH &&
@@ -1020,6 +1020,9 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
return;
}
+ IWL_DEBUG_INFO(mvm, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
+ sec_link, sec_link_tx, sec_link_rx);
+
/* Calculate the percentage of the secondary link TX/RX */
sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
@@ -1039,7 +1042,7 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
- u8 average_energy[IWL_MVM_STATION_COUNT_MAX];
+ u8 average_energy[IWL_STATION_COUNT_MAX];
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_system_statistics_notif_oper *stats;
int i;
@@ -1098,7 +1101,7 @@ static void
iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
- u8 average_energy[IWL_MVM_STATION_COUNT_MAX];
+ u8 average_energy[IWL_STATION_COUNT_MAX];
__le32 air_time[MAC_INDEX_AUX];
__le32 rx_bytes[MAC_INDEX_AUX];
__le32 flags = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 1a210d0c22b3..65f8933c34b4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -729,8 +729,6 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
bool last_subframe =
desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
u8 tid = ieee80211_get_tid(hdr);
- u8 sub_frame_idx = desc->amsdu_info &
- IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
struct iwl_mvm_reorder_buf_entry *entries;
u32 sta_mask;
int index;
@@ -843,10 +841,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
__skb_queue_tail(&entries[index].frames, skb);
buffer->num_stored++;
- if (amsdu) {
+ if (amsdu)
buffer->last_amsdu = sn;
- buffer->last_sub_index = sub_frame_idx;
- }
/*
* We cannot trust NSSN for AMSDU sub-frames that are not the last.
@@ -2542,7 +2538,7 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
goto out;
}
- if (WARN(tid != baid_data->tid || sta_id > IWL_MVM_STATION_COUNT_MAX ||
+ if (WARN(tid != baid_data->tid || sta_id > IWL_STATION_COUNT_MAX ||
!(baid_data->sta_mask & BIT(sta_id)),
"baid 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n",
baid, baid_data->sta_mask, baid_data->tid, sta_id,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 8e0df31f1b3e..3ce9150213a7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -48,6 +48,8 @@
/* Number of iterations on the channel for mei filtered scan */
#define IWL_MEI_SCAN_NUM_ITER 5U
+#define WFA_TPC_IE_LEN 9
+
struct iwl_mvm_scan_timing_params {
u32 suspend_time;
u32 max_out_time;
@@ -303,8 +305,8 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
- /* we create the 802.11 header and SSID element */
- max_probe_len -= 24 + 2;
+ /* we create the 802.11 header SSID element and WFA TPC element */
+ max_probe_len -= 24 + 2 + WFA_TPC_IE_LEN;
/* DS parameter set element is added on 2.4GHZ band if required */
if (iwl_mvm_rrm_scan_needed(mvm))
@@ -731,8 +733,6 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
return newpos;
}
-#define WFA_TPC_IE_LEN 9
-
static void iwl_mvm_add_tpc_report_ie(u8 *pos)
{
pos[0] = WLAN_EID_VENDOR_SPECIFIC;
@@ -837,8 +837,8 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
return ((n_ssids <= PROBE_OPTION_MAX) &&
(n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
(ies->common_ie_len +
- ies->len[NL80211_BAND_2GHZ] +
- ies->len[NL80211_BAND_5GHZ] <=
+ ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] +
+ ies->len[NL80211_BAND_6GHZ] <=
iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
}
@@ -1594,7 +1594,7 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
for (i = 0; i < n_channels; i++) {
channel_cfg[i].flags = cpu_to_le32(flags);
- channel_cfg[i].v1.channel_num = channels[i]->hw_value;
+ channel_cfg[i].channel_num = channels[i]->hw_value;
if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
enum nl80211_band band = channels[i]->band;
@@ -1626,13 +1626,13 @@ iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm,
&cp->channel_config[i];
cfg->flags = cpu_to_le32(flags);
- cfg->v2.channel_num = channels[i]->hw_value;
+ cfg->channel_num = channels[i]->hw_value;
cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band);
cfg->v2.iter_count = 1;
cfg->v2.iter_interval = 0;
iwl_mvm_scan_ch_add_n_aps_override(vif_type,
- cfg->v2.channel_num,
+ cfg->channel_num,
cfg->v2.band, bitmap,
bitmap_n_entries);
}
@@ -1656,9 +1656,20 @@ iwl_mvm_umac_scan_cfg_channels_v7(struct iwl_mvm *mvm,
u8 iwl_band = iwl_mvm_phy_band_from_nl80211(band);
cfg->flags = cpu_to_le32(flags | n_aps_flag);
- cfg->v2.channel_num = channels[i]->hw_value;
+ cfg->channel_num = channels[i]->hw_value;
if (cfg80211_channel_is_psc(channels[i]))
cfg->flags = 0;
+
+ if (band == NL80211_BAND_6GHZ) {
+ /* 6 GHz channels should only appear in a scan request
+ * that has scan_6ghz set. The only exception is MLO
+ * scan, which has to be passive.
+ */
+ WARN_ON_ONCE(cfg->flags != 0);
+ cfg->flags =
+ cpu_to_le32(IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE);
+ }
+
cfg->v2.iter_count = 1;
cfg->v2.iter_interval = 0;
if (version < 17)
@@ -1778,7 +1789,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
!params->n_6ghz_params && params->n_ssids)
continue;
- cfg->v1.channel_num = params->channels[i]->hw_value;
+ cfg->channel_num = params->channels[i]->hw_value;
if (version < 17)
cfg->v2.band = PHY_BAND_6;
else
@@ -2466,7 +2477,7 @@ iwl_mvm_scan_umac_fill_ch_p_v7(struct iwl_mvm *mvm,
if (!cfg80211_channel_is_psc(channel))
continue;
- cfg->v5.channel_num = channel->hw_value;
+ cfg->channel_num = channel->hw_value;
cfg->v5.iter_count = 1;
cfg->v5.iter_interval = 0;
@@ -3168,18 +3179,16 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.n_channels = j;
}
- if (non_psc_included &&
- !iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
- kfree(params.channels);
- return -ENOBUFS;
+ if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
+ ret = -ENOBUFS;
+ goto out;
}
uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type);
-
- if (non_psc_included)
- kfree(params.channels);
- if (uid < 0)
- return uid;
+ if (uid < 0) {
+ ret = uid;
+ goto out;
+ }
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) {
@@ -3197,6 +3206,9 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
}
+out:
+ if (non_psc_included)
+ kfree(params.channels);
return ret;
}
@@ -3301,13 +3313,23 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
mvm->scan_start);
}
-static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
+static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type, bool *wait)
{
- struct iwl_umac_scan_abort cmd = {};
+ struct iwl_umac_scan_abort abort_cmd = {};
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
+ .len = { sizeof(abort_cmd), },
+ .data = { &abort_cmd, },
+ .flags = CMD_SEND_IN_RFKILL,
+ };
+
int uid, ret;
+ u32 status = IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND;
lockdep_assert_held(&mvm->mutex);
+ *wait = true;
+
/* We should always get a valid index here, because we already
* checked that this type of scan was running in the generic
* code.
@@ -3316,17 +3338,28 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
if (WARN_ON_ONCE(uid < 0))
return uid;
- cmd.uid = cpu_to_le32(uid);
+ abort_cmd.uid = cpu_to_le32(uid);
IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
- ret = iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
- CMD_SEND_IN_RFKILL, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
+
+ IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d, status=%u\n", ret, status);
if (!ret)
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
- IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d\n", ret);
+ /* Handle the case that the FW is no longer familiar with the scan that
+ * is to be stopped. In such a case, it is expected that the scan
+ * complete notification was already received but not yet processed.
+ * In such a case, there is no need to wait for a scan complete
+ * notification and the flow should continue similar to the case that
+ * the scan was really aborted.
+ */
+ if (status == IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND) {
+ mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+ *wait = false;
+ }
+
return ret;
}
@@ -3336,6 +3369,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
SCAN_OFFLOAD_COMPLETE, };
int ret;
+ bool wait = true;
lockdep_assert_held(&mvm->mutex);
@@ -3347,7 +3381,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
- ret = iwl_mvm_umac_scan_abort(mvm, type);
+ ret = iwl_mvm_umac_scan_abort(mvm, type, &wait);
else
ret = iwl_mvm_lmac_scan_abort(mvm);
@@ -3355,6 +3389,10 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
return ret;
+ } else if (!wait) {
+ IWL_DEBUG_SCAN(mvm, "no need to wait for scan type %d\n", type);
+ iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
+ return 0;
}
return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 15e64d94d6ea..b6c99cd6d9e5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -29,7 +29,7 @@ int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype)
int sta_id;
u32 reserved_ids = 0;
- BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
+ BUILD_BUG_ON(IWL_STATION_COUNT_MAX > 32);
WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
lockdep_assert_held(&mvm->mutex);
@@ -900,7 +900,7 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_tid(sta, tid);
unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+ iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
int queue = -1;
lockdep_assert_held(&mvm->mutex);
@@ -1080,7 +1080,7 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
return;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
- wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+ wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
@@ -1330,7 +1330,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
.frame_limit = IWL_FRAME_LIMIT,
};
unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+ iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
int queue = -1;
u16 queue_tmp;
unsigned long disable_agg_tids = 0;
@@ -1622,7 +1622,7 @@ void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
unsigned int wdg =
- iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
+ iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif);
int i;
struct iwl_trans_txq_scd_cfg cfg = {
.sta_id = mvm_sta->deflink.sta_id,
@@ -2359,7 +2359,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
int queue;
int ret;
unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ iwl_mvm_get_wd_timeout(mvm, vif);
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_VO,
.sta_id = mvmvif->deflink.bcast_sta.sta_id,
@@ -2568,7 +2568,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
- unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif);
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -3207,7 +3207,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
+ iwl_mvm_get_wd_timeout(mvm, vif);
int queue, ret;
bool alloc_queue = true;
enum iwl_mvm_queue_status queue_status;
@@ -4326,7 +4326,7 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u16 queue;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ iwl_mvm_get_wd_timeout(mvm, vif);
bool mld = iwl_mvm_has_mld_api(mvm->fw);
u32 type = mld ? STATION_TYPE_PEER : IWL_STA_LINK;
@@ -4455,10 +4455,10 @@ void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
sizeof(queue_counter->per_link));
queue_counter->window_start = jiffies;
- IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n");
+ IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n");
}
- for (int i = 0; i < IWL_MVM_FW_MAX_LINK_ID; i++)
+ for (int i = 0; i < IWL_FW_MAX_LINK_ID; i++)
total_mpdus += tx ? queue_counter->per_link[i].tx :
queue_counter->per_link[i].rx;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 0dc83d6afb3c..4a3799ae7c18 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -12,7 +12,7 @@
#include <linux/wait.h>
#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */
-#include "fw-api.h" /* IWL_MVM_STATION_COUNT_MAX */
+#include "fw-api.h" /* IWL_STATION_COUNT_MAX */
#include "rs.h"
struct iwl_mvm;
@@ -361,7 +361,7 @@ struct iwl_mvm_mpdu_counter {
*/
struct iwl_mvm_tpt_counter {
spinlock_t lock;
- struct iwl_mvm_mpdu_counter per_link[IWL_MVM_FW_MAX_LINK_ID];
+ struct iwl_mvm_mpdu_counter per_link[IWL_FW_MAX_LINK_ID];
unsigned long window_start;
} ____cacheline_aligned_in_smp;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index a8c42ce3b630..72fa7ac86516 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -114,16 +114,14 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
mvm->aux_sta.tfd_queue_msk);
- if (mvm->mld_api_is_used) {
- iwl_mvm_mld_rm_aux_sta(mvm);
- mutex_unlock(&mvm->mutex);
- return;
- }
-
/* In newer version of this command an aux station is added only
* in cases of dedicated tx queue and need to be removed in end
- * of use */
- if (iwl_mvm_has_new_station_api(mvm->fw))
+ * of use. For the even newer mld api, use the appropriate
+ * function.
+ */
+ if (mvm->mld_api_is_used)
+ iwl_mvm_mld_rm_aux_sta(mvm);
+ else if (iwl_mvm_has_new_station_api(mvm->fw))
iwl_mvm_rm_aux_sta(mvm);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 7ff5ea5e7aca..ca026b5256ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1203,6 +1203,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
bool is_ampdu = false;
int hdrlen;
+ if (WARN_ON_ONCE(!sta))
+ return -1;
+
mvmsta = iwl_mvm_sta_from_mac80211(sta);
fc = hdr->frame_control;
hdrlen = ieee80211_hdrlen(fc);
@@ -1210,9 +1213,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
return -1;
- if (WARN_ON_ONCE(!mvmsta))
- return -1;
-
if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
return -1;
@@ -1343,7 +1343,7 @@ drop:
int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta)
{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_sta *mvmsta;
struct ieee80211_tx_info info;
struct sk_buff_head mpdus_skbs;
struct ieee80211_vif *vif;
@@ -1352,9 +1352,11 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct sk_buff *orig_skb = skb;
const u8 *addr3;
- if (WARN_ON_ONCE(!mvmsta))
+ if (WARN_ON_ONCE(!sta))
return -1;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
return -1;
@@ -1678,7 +1680,7 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
* For 22000-series and lower, this is just 12 bits. For later, 16 bits.
*/
static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
- struct iwl_mvm_tx_resp *tx_resp)
+ struct iwl_tx_resp *tx_resp)
{
u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
tx_resp->frame_count);
@@ -1694,8 +1696,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct ieee80211_sta *sta;
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
int txq_id = SEQ_TO_QUEUE(sequence);
- /* struct iwl_mvm_tx_resp_v3 is almost the same */
- struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ /* struct iwl_tx_resp_v3 is almost the same */
+ struct iwl_tx_resp *tx_resp = (void *)pkt->data;
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
struct agg_tx_status *agg_status =
@@ -1952,7 +1954,7 @@ static const char *iwl_get_agg_tx_status(u16 status)
static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
- struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ struct iwl_tx_resp *tx_resp = (void *)pkt->data;
struct agg_tx_status *frame_status =
iwl_mvm_get_agg_status(mvm, tx_resp);
int i;
@@ -1986,7 +1988,7 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
- struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ struct iwl_tx_resp *tx_resp = (void *)pkt->data;
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -2027,7 +2029,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ struct iwl_tx_resp *tx_resp = (void *)pkt->data;
if (tx_resp->frame_count == 1)
iwl_mvm_rx_tx_cmd_single(mvm, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 0e5fa8374103..1d1364d03f02 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -297,6 +297,10 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (vif->type != NL80211_IFTYPE_STATION)
return;
+ /* SMPS is handled by firmware */
+ if (iwl_mvm_has_rlc_offload(mvm))
+ return;
+
mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (WARN_ON_ONCE(!mvmvif->link[link_id]))
@@ -743,58 +747,20 @@ bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
}
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool tdls, bool cmd_q)
+ struct ieee80211_vif *vif)
{
- struct iwl_fw_dbg_trigger_tlv *trigger;
- struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
- unsigned int default_timeout = cmd_q ?
- IWL_DEF_WD_TIMEOUT :
+ unsigned int default_timeout =
mvm->trans->trans_cfg->base_params->wd_timeout;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
- /*
- * We can't know when the station is asleep or awake, so we
- * must disable the queue hang detection.
- */
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
- vif && vif->type == NL80211_IFTYPE_AP)
- return IWL_WATCHDOG_DISABLED;
- return default_timeout;
- }
-
- trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
- txq_timer = (void *)trigger->data;
-
- if (tdls)
- return le32_to_cpu(txq_timer->tdls);
-
- if (cmd_q)
- return le32_to_cpu(txq_timer->command_queue);
-
- if (WARN_ON(!vif))
- return default_timeout;
-
- switch (ieee80211_vif_type_p2p(vif)) {
- case NL80211_IFTYPE_ADHOC:
- return le32_to_cpu(txq_timer->ibss);
- case NL80211_IFTYPE_STATION:
- return le32_to_cpu(txq_timer->bss);
- case NL80211_IFTYPE_AP:
- return le32_to_cpu(txq_timer->softap);
- case NL80211_IFTYPE_P2P_CLIENT:
- return le32_to_cpu(txq_timer->p2p_client);
- case NL80211_IFTYPE_P2P_GO:
- return le32_to_cpu(txq_timer->p2p_go);
- case NL80211_IFTYPE_P2P_DEVICE:
- return le32_to_cpu(txq_timer->p2p_device);
- case NL80211_IFTYPE_MONITOR:
- return default_timeout;
- default:
- WARN_ON(1);
- return mvm->trans->trans_cfg->base_params->wd_timeout;
- }
+ /*
+ * We can't know when the station is asleep or awake, so we
+ * must disable the queue hang detection.
+ */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
+ vif->type == NL80211_IFTYPE_AP)
+ return IWL_WATCHDOG_DISABLED;
+ return default_timeout;
}
void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index e63efbf809f0..ae93a72542b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -89,7 +89,8 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
}
break;
default:
- IWL_ERR(trans, "WRT: Invalid buffer destination\n");
+ IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n",
+ le32_to_cpu(fw_mon_cfg->buf_location));
}
out:
if (dbg_flags)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 9ad43464b702..805fb249a0c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -500,9 +500,7 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
/* Bz devices */
- {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)},
@@ -1577,11 +1575,12 @@ static int iwl_pci_suspend(struct device *device)
return 0;
}
-static int iwl_pci_resume(struct device *device)
+static int _iwl_pci_resume(struct device *device, bool restore)
{
struct pci_dev *pdev = to_pci_dev(device);
struct iwl_trans *trans = pci_get_drvdata(pdev);
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool device_was_powered_off = false;
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
@@ -1597,6 +1596,26 @@ static int iwl_pci_resume(struct device *device)
if (!trans->op_mode)
return 0;
+ /*
+ * Scratch value was altered, this means the device was powered off, we
+ * need to reset it completely.
+ * Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan,
+ * so assume that any bits there mean that the device is usable.
+ */
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ &&
+ !iwl_read32(trans, CSR_FUNC_SCRATCH))
+ device_was_powered_off = true;
+
+ if (restore || device_was_powered_off) {
+ trans->state = IWL_TRANS_NO_FW;
+ /* Hope for the best here ... If one of those steps fails we
+ * won't really know how to recover.
+ */
+ iwl_pcie_prepare_card_hw(trans);
+ iwl_finish_nic_init(trans);
+ iwl_op_mode_device_powered_off(trans->op_mode);
+ }
+
/* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return 0;
@@ -1617,9 +1636,23 @@ static int iwl_pci_resume(struct device *device)
return 0;
}
+static int iwl_pci_restore(struct device *device)
+{
+ return _iwl_pci_resume(device, true);
+}
+
+static int iwl_pci_resume(struct device *device)
+{
+ return _iwl_pci_resume(device, false);
+}
+
static const struct dev_pm_ops iwl_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend,
- iwl_pci_resume)
+ .suspend = pm_sleep_ptr(iwl_pci_suspend),
+ .resume = pm_sleep_ptr(iwl_pci_resume),
+ .freeze = pm_sleep_ptr(iwl_pci_suspend),
+ .thaw = pm_sleep_ptr(iwl_pci_resume),
+ .poweroff = pm_sleep_ptr(iwl_pci_suspend),
+ .restore = pm_sleep_ptr(iwl_pci_restore),
};
#define IWL_PM_OPS (&iwl_dev_pm_ops)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index b59de4f80b4b..27a7e0b5b3d5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -639,7 +639,8 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue);
-dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr);
+dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+ unsigned int len);
struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta,
u8 **hdr, unsigned int hdr_room);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 18dda89b7985..8903a5692dfb 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -526,6 +526,8 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
keep_ram_busy = !iwl_pcie_set_ltr(trans);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ IWL_DEBUG_POWER(trans, "function scratch register value is 0x%08x\n",
+ iwl_read32(trans, CSR_FUNC_SCRATCH));
iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE);
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_ROM_START);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 719ddc4b72c5..3b9943eb6934 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1967,7 +1967,6 @@ void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue;
trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo;
- trans_pcie->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs;
trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
@@ -3567,6 +3566,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
+ /* Set a short watchdog for the command queue */
+ trans_pcie->txqs.cmd.wdg_timeout = IWL_DEF_WD_TIMEOUT;
+
trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
if (!trans_pcie->txqs.tso_hdr_page) {
ret = -ENOMEM;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 2e780fb2da42..b1846abb99b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -168,6 +168,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int data_offset = 0;
dma_addr_t start_hdr_phys;
u16 length, amsdu_pad;
u8 *start_hdr;
@@ -260,7 +261,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
int ret;
tb_len = min_t(unsigned int, tso.size, data_left);
- tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data);
+ tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset,
+ tb_len);
/* Not a real mapping error, use direct comparison */
if (unlikely(tb_phys == DMA_MAPPING_ERROR))
goto out_err;
@@ -272,6 +274,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
data_left -= tb_len;
+ data_offset += tb_len;
tso_build_data(skb, &tso, tb_len);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 22d482ae53d9..9fe050f0ddc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1814,23 +1814,31 @@ out:
/**
* iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
* @sgt: scatter gather table
- * @addr: Virtual address
+ * @offset: Offset into the mapped memory (i.e. SKB payload data)
+ * @len: Length of the area
*
- * Find the entry that includes the address for the given address and return
- * correct physical address for the TB entry.
+ * Find the DMA address that corresponds to the SKB payload data at the
+ * position given by @offset.
*
* Returns: Address for TB entry
*/
-dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr)
+dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+ unsigned int len)
{
struct scatterlist *sg;
+ unsigned int sg_offset = 0;
int i;
+ /*
+ * Search the mapped DMA areas in the SG for the area that contains the
+ * data at offset with the given length.
+ */
for_each_sgtable_dma_sg(sgt, sg, i) {
- if (addr >= sg_virt(sg) &&
- (u8 *)addr < (u8 *)sg_virt(sg) + sg_dma_len(sg))
- return sg_dma_address(sg) +
- ((unsigned long)addr - (unsigned long)sg_virt(sg));
+ if (offset >= sg_offset &&
+ offset + len <= sg_offset + sg_dma_len(sg))
+ return sg_dma_address(sg) + offset - sg_offset;
+
+ sg_offset += sg_dma_len(sg);
}
WARN_ON_ONCE(1);
@@ -1875,7 +1883,9 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
- sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
+ /* Only map the data, not the header (it is copied to the TSO page) */
+ sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb),
+ skb->data_len);
if (WARN_ON_ONCE(sgt->orig_nents <= 0))
return NULL;
@@ -1900,6 +1910,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int data_offset = 0;
u16 length, iv_len, amsdu_pad;
dma_addr_t start_hdr_phys;
u8 *start_hdr, *pos_hdr;
@@ -2000,7 +2011,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
data_left);
dma_addr_t tb_phys;
- tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data);
+ tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, size);
/* Not a real mapping error, use direct comparison */
if (unlikely(tb_phys == DMA_MAPPING_ERROR))
return -EINVAL;
@@ -2011,6 +2022,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
tb_phys, size);
data_left -= size;
+ data_offset += size;
tso_build_data(skb, &tso, size);
}
}
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index b700c213d10c..afe9bcd3ad46 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -15,7 +15,7 @@
#include <linux/slab.h>
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "decl.h"
#include "cfg.h"
diff --git a/drivers/net/wireless/marvell/libertas/cmd.h b/drivers/net/wireless/marvell/libertas/cmd.h
index 3c193074662b..d7be232f5739 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.h
+++ b/drivers/net/wireless/marvell/libertas/cmd.h
@@ -116,11 +116,6 @@ int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
int8_t p2, int usesnr);
-int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
-
-int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
- uint16_t cmd_action);
-
int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep);
diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c
index 74cb7551f427..f2aa659e7714 100644
--- a/drivers/net/wireless/marvell/libertas/cmdresp.c
+++ b/drivers/net/wireless/marvell/libertas/cmdresp.c
@@ -8,7 +8,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/sched.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/cfg80211.h>
#include "cfg.h"
diff --git a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
index 631b5da09f86..a5d4c09fb918 100644
--- a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
@@ -484,12 +484,9 @@ void lbtf_complete_command(struct lbtf_private *priv, struct cmd_ctrl_node *cmd,
void lbtf_cmd_response_rx(struct lbtf_private *priv);
/* main.c */
-struct chan_freq_power *lbtf_get_region_cfp_table(u8 region,
- int *cfp_no);
struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev,
const struct lbtf_ops *ops);
int lbtf_remove_card(struct lbtf_private *priv);
-int lbtf_start_card(struct lbtf_private *priv);
int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb);
void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail);
void lbtf_bcn_sent(struct lbtf_private *priv);
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index b90f922f1cdc..032b93a41d99 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -117,12 +117,12 @@ void mwifiex_dfs_cac_work_queue(struct work_struct *work)
dfs_cac_work);
chandef = priv->dfs_chandef;
- if (priv->wdev.cac_started) {
+ if (priv->wdev.links[0].cac_started) {
mwifiex_dbg(priv->adapter, MSG,
"CAC timer finished; No radar detected\n");
cfg80211_cac_event(priv->netdev, &chandef,
NL80211_RADAR_CAC_FINISHED,
- GFP_KERNEL);
+ GFP_KERNEL, 0);
}
}
@@ -174,7 +174,7 @@ int mwifiex_stop_radar_detection(struct mwifiex_private *priv,
*/
void mwifiex_abort_cac(struct mwifiex_private *priv)
{
- if (priv->wdev.cac_started) {
+ if (priv->wdev.links[0].cac_started) {
if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef))
mwifiex_dbg(priv->adapter, ERROR,
"failed to stop CAC in FW\n");
@@ -182,7 +182,8 @@ void mwifiex_abort_cac(struct mwifiex_private *priv)
"Aborting delayed work for CAC.\n");
cancel_delayed_work_sync(&priv->dfs_cac_work);
cfg80211_cac_event(priv->netdev, &priv->dfs_chandef,
- NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
+ NL80211_RADAR_CAC_ABORTED, GFP_KERNEL,
+ 0);
}
}
@@ -221,7 +222,7 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv,
cfg80211_cac_event(priv->netdev,
&priv->dfs_chandef,
NL80211_RADAR_DETECTED,
- GFP_KERNEL);
+ GFP_KERNEL, 0);
}
break;
default:
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index c0c635e74bc5..66f0f5377ac1 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -881,8 +881,6 @@ void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter)
struct mwifiex_private *priv;
for (i = 0; i < adapter->priv_num; i++) {
- if (!adapter->priv[i])
- continue;
priv = adapter->priv[i];
tx_win_size = priv->add_ba_param.tx_win_size;
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.h b/drivers/net/wireless/marvell/mwifiex/11n.h
index 7738ebe1fec1..773bd5c0f007 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n.h
@@ -108,9 +108,7 @@ static inline u8 mwifiex_space_avail_for_new_ba_stream(
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv)
- ba_stream_num += list_count_nodes(
- &priv->tx_ba_stream_tbl_ptr);
+ ba_stream_num += list_count_nodes(&priv->tx_ba_stream_tbl_ptr);
}
if (adapter->fw_api_ver == MWIFIEX_FW_V15) {
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 10690e82358b..cb948ca34373 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -810,8 +810,6 @@ void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (!priv)
- continue;
spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
@@ -834,8 +832,6 @@ static void mwifiex_update_ampdu_rxwinsize(struct mwifiex_adapter *adapter,
dev_dbg(adapter->dev, "Update rxwinsize %d\n", coex_flag);
for (i = 0; i < adapter->priv_num; i++) {
- if (!adapter->priv[i])
- continue;
priv = adapter->priv[i];
rx_win_size = priv->add_ba_param.rx_win_size;
if (coex_flag) {
@@ -882,17 +878,16 @@ void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter)
u8 count = 0;
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- priv = adapter->priv[i];
- if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
- if (priv->media_connected)
- count++;
- }
- if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
- if (priv->bss_started)
- count++;
- }
+ priv = adapter->priv[i];
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
+ if (priv->media_connected)
+ count++;
}
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ if (priv->bss_started)
+ count++;
+ }
+
if (count >= MWIFIEX_BSS_COEX_COUNT)
break;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 155eb0fab12a..fca3eea7ee84 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -221,6 +221,26 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
return 0;
}
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ if (ieee80211_is_auth(mgmt->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "auth: send auth to %pM\n", mgmt->da);
+ if (ieee80211_is_deauth(mgmt->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "auth: send deauth to %pM\n", mgmt->da);
+ if (ieee80211_is_disassoc(mgmt->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "assoc: send disassoc to %pM\n", mgmt->da);
+ if (ieee80211_is_assoc_resp(mgmt->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "assoc: send assoc resp to %pM\n",
+ mgmt->da);
+ if (ieee80211_is_reassoc_resp(mgmt->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "assoc: send reassoc resp to %pM\n",
+ mgmt->da);
+ }
+
pkt_len = len + ETH_ALEN;
skb = dev_alloc_skb(MWIFIEX_MIN_DATA_HEADER_LEN +
MWIFIEX_MGMT_FRAME_HEADER_SIZE +
@@ -268,6 +288,8 @@ mwifiex_cfg80211_update_mgmt_frame_registrations(struct wiphy *wiphy,
if (mask != priv->mgmt_frame_mask) {
priv->mgmt_frame_mask = mask;
+ if (priv->host_mlme_reg)
+ priv->mgmt_frame_mask |= HOST_MLME_MGMT_MASK;
mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
HostCmd_ACT_GEN_SET, 0,
&priv->mgmt_frame_mask, false);
@@ -503,6 +525,9 @@ mwifiex_cfg80211_set_default_mgmt_key(struct wiphy *wiphy,
wiphy_dbg(wiphy, "set default mgmt key, key index=%d\n", key_index);
+ if (priv->adapter->host_mlme_enabled)
+ return 0;
+
memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
encrypt_key.key_len = WLAN_KEY_LEN_CCMP;
encrypt_key.key_index = key_index;
@@ -848,6 +873,7 @@ static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
struct mwifiex_adapter *adapter = priv->adapter;
unsigned long flags;
+ priv->host_mlme_reg = false;
priv->mgmt_frame_mask = 0;
if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
HostCmd_ACT_GEN_SET, 0,
@@ -1880,7 +1906,7 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_sta_node *sta_node;
u8 deauth_mac[ETH_ALEN];
- if (!priv->bss_started && priv->wdev.cac_started) {
+ if (!priv->bss_started && priv->wdev.links[0].cac_started) {
mwifiex_dbg(priv->adapter, INFO, "%s: abort CAC!\n", __func__);
mwifiex_abort_cac(priv);
}
@@ -3482,7 +3508,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv && priv->netdev)
+ if (priv->netdev)
netif_device_detach(priv->netdev);
}
@@ -3554,7 +3580,7 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv && priv->netdev)
+ if (priv->netdev)
netif_device_attach(priv->netdev);
}
@@ -3633,6 +3659,9 @@ static int mwifiex_set_rekey_data(struct wiphy *wiphy, struct net_device *dev,
if (!ISSUPP_FIRMWARE_SUPPLICANT(priv->adapter->fw_cap_info))
return -EOPNOTSUPP;
+ if (priv->adapter->host_mlme_enabled)
+ return 0;
+
return mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG,
HostCmd_ACT_GEN_SET, 0, data, true);
}
@@ -3948,11 +3977,42 @@ mwifiex_cfg80211_tdls_cancel_chan_switch(struct wiphy *wiphy,
}
static int
+mwifiex_cfg80211_uap_add_station(struct mwifiex_private *priv, const u8 *mac,
+ struct station_parameters *params)
+{
+ struct mwifiex_sta_info add_sta;
+ int ret;
+
+ memcpy(add_sta.peer_mac, mac, ETH_ALEN);
+ add_sta.params = params;
+
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_ADD_NEW_STATION,
+ HostCmd_ACT_ADD_STA, 0, (void *)&add_sta, true);
+
+ if (!ret) {
+ struct station_info *sinfo;
+
+ sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+ if (!sinfo)
+ return -ENOMEM;
+
+ cfg80211_new_sta(priv->netdev, mac, sinfo, GFP_KERNEL);
+ kfree(sinfo);
+ }
+
+ return ret;
+}
+
+static int
mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_parameters *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ if (priv->adapter->host_mlme_enabled &&
+ (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP))
+ return mwifiex_cfg80211_uap_add_station(priv, mac, params);
+
if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
return -EOPNOTSUPP;
@@ -3978,7 +4038,7 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
return -EBUSY;
}
- if (priv->wdev.cac_started)
+ if (priv->wdev.links[0].cac_started)
return -EBUSY;
if (cfg80211_chandef_identical(&params->chandef,
@@ -4145,7 +4205,7 @@ static int
mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms)
+ u32 cac_time_ms, int link_id)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct mwifiex_radar_params radar_params;
@@ -4190,6 +4250,10 @@ mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev,
int ret;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ if (priv->adapter->host_mlme_enabled &&
+ (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP))
+ return 0;
+
/* we support change_station handler only for TDLS peers*/
if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
return -EOPNOTSUPP;
@@ -4206,8 +4270,307 @@ mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
+static int
+mwifiex_cfg80211_authenticate(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_auth_request *req)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct sk_buff *skb;
+ u16 pkt_len, auth_alg;
+ int ret;
+ struct mwifiex_ieee80211_mgmt *mgmt;
+ struct mwifiex_txinfo *tx_info;
+ u32 tx_control = 0, pkt_type = PKT_TYPE_MGMT;
+ u8 trans = 1, status_code = 0;
+ u8 *varptr = NULL;
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ mwifiex_dbg(priv->adapter, ERROR, "Interface role is AP\n");
+ return -EFAULT;
+ }
+
+ if (priv->wdev.iftype != NL80211_IFTYPE_STATION) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Interface type is not correct (type %d)\n",
+ priv->wdev.iftype);
+ return -EINVAL;
+ }
+
+ if (priv->auth_alg != WLAN_AUTH_SAE &&
+ (priv->auth_flag & HOST_MLME_AUTH_PENDING)) {
+ mwifiex_dbg(priv->adapter, ERROR, "Pending auth on going\n");
+ return -EBUSY;
+ }
+
+ if (!priv->host_mlme_reg) {
+ priv->host_mlme_reg = true;
+ priv->mgmt_frame_mask |= HOST_MLME_MGMT_MASK;
+ mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->mgmt_frame_mask, false);
+ }
+
+ switch (req->auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ auth_alg = WLAN_AUTH_OPEN;
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ auth_alg = WLAN_AUTH_SHARED_KEY;
+ break;
+ case NL80211_AUTHTYPE_FT:
+ auth_alg = WLAN_AUTH_FT;
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ auth_alg = WLAN_AUTH_LEAP;
+ break;
+ case NL80211_AUTHTYPE_SAE:
+ auth_alg = WLAN_AUTH_SAE;
+ break;
+ default:
+ mwifiex_dbg(priv->adapter, ERROR,
+ "unsupported auth type=%d\n", req->auth_type);
+ return -EOPNOTSUPP;
+ }
+
+ if (!priv->auth_flag) {
+ ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_SET,
+ req->bss->channel,
+ AUTH_TX_DEFAULT_WAIT_TIME);
+
+ if (!ret) {
+ priv->roc_cfg.cookie = get_random_u32() | 1;
+ priv->roc_cfg.chan = *req->bss->channel;
+ } else {
+ return -EFAULT;
+ }
+ }
+
+ priv->sec_info.authentication_mode = auth_alg;
+
+ mwifiex_cancel_scan(adapter);
+
+ pkt_len = (u16)req->ie_len + req->auth_data_len +
+ MWIFIEX_MGMT_HEADER_LEN + MWIFIEX_AUTH_BODY_LEN;
+ if (req->auth_data_len >= 4)
+ pkt_len -= 4;
+
+ skb = dev_alloc_skb(MWIFIEX_MIN_DATA_HEADER_LEN +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+ pkt_len + sizeof(pkt_len));
+ if (!skb) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "allocate skb failed for management frame\n");
+ return -ENOMEM;
+ }
+
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+ tx_info->pkt_len = pkt_len;
+
+ skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+ memcpy(skb_push(skb, sizeof(pkt_len)), &pkt_len, sizeof(pkt_len));
+ memcpy(skb_push(skb, sizeof(tx_control)),
+ &tx_control, sizeof(tx_control));
+ memcpy(skb_push(skb, sizeof(pkt_type)), &pkt_type, sizeof(pkt_type));
+
+ mgmt = (struct mwifiex_ieee80211_mgmt *)skb_put(skb, pkt_len);
+ memset(mgmt, 0, pkt_len);
+ mgmt->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
+ memcpy(mgmt->da, req->bss->bssid, ETH_ALEN);
+ memcpy(mgmt->sa, priv->curr_addr, ETH_ALEN);
+ memcpy(mgmt->bssid, req->bss->bssid, ETH_ALEN);
+ eth_broadcast_addr(mgmt->addr4);
+
+ if (req->auth_data_len >= 4) {
+ if (req->auth_type == NL80211_AUTHTYPE_SAE) {
+ __le16 *pos = (__le16 *)req->auth_data;
+
+ trans = le16_to_cpu(pos[0]);
+ status_code = le16_to_cpu(pos[1]);
+ }
+ memcpy((u8 *)(&mgmt->auth.variable), req->auth_data + 4,
+ req->auth_data_len - 4);
+ varptr = (u8 *)&mgmt->auth.variable +
+ (req->auth_data_len - 4);
+ }
+
+ mgmt->auth.auth_alg = cpu_to_le16(auth_alg);
+ mgmt->auth.auth_transaction = cpu_to_le16(trans);
+ mgmt->auth.status_code = cpu_to_le16(status_code);
+
+ if (req->ie && req->ie_len) {
+ if (!varptr)
+ varptr = (u8 *)&mgmt->auth.variable;
+ memcpy((u8 *)varptr, req->ie, req->ie_len);
+ }
+
+ priv->auth_flag = HOST_MLME_AUTH_PENDING;
+ priv->auth_alg = auth_alg;
+
+ skb->priority = WMM_HIGHEST_PRIORITY;
+ __net_timestamp(skb);
+
+ mwifiex_dbg(priv->adapter, MSG,
+ "auth: send authentication to %pM\n", req->bss->bssid);
+
+ mwifiex_queue_tx_pkt(priv, skb);
+
+ return 0;
+}
+
+static int
+mwifiex_cfg80211_associate(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_assoc_request *req)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ struct mwifiex_adapter *adapter = priv->adapter;
+ int ret;
+ struct cfg80211_ssid req_ssid;
+ const u8 *ssid_ie;
+
+ if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
+ mwifiex_dbg(adapter, ERROR,
+ "%s: reject infra assoc request in non-STA role\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) ||
+ test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
+ mwifiex_dbg(adapter, ERROR,
+ "%s: Ignore association.\t"
+ "Card removed or FW in bad state\n",
+ dev->name);
+ return -EFAULT;
+ }
+
+ if (priv->auth_alg == WLAN_AUTH_SAE)
+ priv->auth_flag = HOST_MLME_AUTH_DONE;
+
+ if (priv->auth_flag && !(priv->auth_flag & HOST_MLME_AUTH_DONE))
+ return -EBUSY;
+
+ if (priv->roc_cfg.cookie) {
+ ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_REMOVE,
+ &priv->roc_cfg.chan, 0);
+ if (!ret)
+ memset(&priv->roc_cfg, 0,
+ sizeof(struct mwifiex_roc_cfg));
+ else
+ return -EFAULT;
+ }
+
+ if (!mwifiex_stop_bg_scan(priv))
+ cfg80211_sched_scan_stopped_locked(priv->wdev.wiphy, 0);
+
+ memset(&req_ssid, 0, sizeof(struct cfg80211_ssid));
+ rcu_read_lock();
+ ssid_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
+
+ if (!ssid_ie)
+ goto ssid_err;
+
+ req_ssid.ssid_len = ssid_ie[1];
+ if (req_ssid.ssid_len > IEEE80211_MAX_SSID_LEN) {
+ mwifiex_dbg(adapter, ERROR, "invalid SSID - aborting\n");
+ goto ssid_err;
+ }
+
+ memcpy(req_ssid.ssid, ssid_ie + 2, req_ssid.ssid_len);
+ if (!req_ssid.ssid_len || req_ssid.ssid[0] < 0x20) {
+ mwifiex_dbg(adapter, ERROR, "invalid SSID - aborting\n");
+ goto ssid_err;
+ }
+ rcu_read_unlock();
+
+ /* As this is new association, clear locally stored
+ * keys and security related flags
+ */
+ priv->sec_info.wpa_enabled = false;
+ priv->sec_info.wpa2_enabled = false;
+ priv->wep_key_curr_index = 0;
+ priv->sec_info.encryption_mode = 0;
+ priv->sec_info.is_authtype_auto = 0;
+ if (mwifiex_set_encode(priv, NULL, NULL, 0, 0, NULL, 1)) {
+ mwifiex_dbg(priv->adapter, ERROR, "deleting the crypto keys\n");
+ return -EFAULT;
+ }
+
+ if (req->crypto.n_ciphers_pairwise)
+ priv->sec_info.encryption_mode =
+ req->crypto.ciphers_pairwise[0];
+
+ if (req->crypto.cipher_group)
+ priv->sec_info.encryption_mode = req->crypto.cipher_group;
+
+ if (req->ie)
+ mwifiex_set_gen_ie(priv, req->ie, req->ie_len);
+
+ memcpy(priv->cfg_bssid, req->bss->bssid, ETH_ALEN);
+
+ mwifiex_dbg(adapter, MSG,
+ "assoc: send association to %pM\n", req->bss->bssid);
+
+ cfg80211_ref_bss(adapter->wiphy, req->bss);
+ ret = mwifiex_bss_start(priv, req->bss, &req_ssid);
+ if (ret) {
+ priv->auth_flag = 0;
+ priv->auth_alg = WLAN_AUTH_NONE;
+ eth_zero_addr(priv->cfg_bssid);
+ }
+
+ if (priv->assoc_rsp_size) {
+ priv->req_bss = req->bss;
+ adapter->assoc_resp_received = true;
+ queue_work(adapter->host_mlme_workqueue,
+ &adapter->host_mlme_work);
+ }
+
+ cfg80211_put_bss(priv->adapter->wiphy, req->bss);
+
+ return 0;
+
+ssid_err:
+ rcu_read_unlock();
+ return -EFAULT;
+}
+
+static int
+mwifiex_cfg80211_deauthenticate(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_deauth_request *req)
+{
+ return mwifiex_cfg80211_disconnect(wiphy, dev, req->reason_code);
+}
+
+static int
+mwifiex_cfg80211_disassociate(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_disassoc_request *req)
+{
+ return mwifiex_cfg80211_disconnect(wiphy, dev, req->reason_code);
+}
+
+static int
+mwifiex_cfg80211_probe_client(struct wiphy *wiphy,
+ struct net_device *dev, const u8 *peer,
+ u64 *cookie)
+{
+ /* hostapd looks for NL80211_CMD_PROBE_CLIENT support; otherwise,
+ * it requires monitor-mode support (which mwifiex doesn't support).
+ * Provide fake probe_client support to work around this.
+ */
+ return -EOPNOTSUPP;
+}
+
/* station cfg80211 operations */
-static struct cfg80211_ops mwifiex_cfg80211_ops = {
+static const struct cfg80211_ops mwifiex_cfg80211_ops = {
.add_virtual_intf = mwifiex_add_virtual_intf,
.del_virtual_intf = mwifiex_del_virtual_intf,
.change_virtual_intf = mwifiex_cfg80211_change_virtual_intf,
@@ -4342,32 +4705,82 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
struct mwifiex_private *priv = adapter->priv[MWIFIEX_BSS_TYPE_STA];
u8 *country_code;
u32 thr, retry;
+ struct cfg80211_ops *ops;
+
+ ops = devm_kmemdup(adapter->dev, &mwifiex_cfg80211_ops,
+ sizeof(mwifiex_cfg80211_ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
/* create a new wiphy for use with cfg80211 */
- wiphy = wiphy_new(&mwifiex_cfg80211_ops,
- sizeof(struct mwifiex_adapter *));
+ wiphy = wiphy_new(ops, sizeof(struct mwifiex_adapter *));
if (!wiphy) {
mwifiex_dbg(adapter, ERROR,
"%s: creating new wiphy\n", __func__);
return -ENOMEM;
}
+ if (adapter->host_mlme_enabled) {
+ ops->auth = mwifiex_cfg80211_authenticate;
+ ops->assoc = mwifiex_cfg80211_associate;
+ ops->deauth = mwifiex_cfg80211_deauthenticate;
+ ops->disassoc = mwifiex_cfg80211_disassociate;
+ ops->disconnect = NULL;
+ ops->connect = NULL;
+ ops->probe_client = mwifiex_cfg80211_probe_client;
+ }
wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
- wiphy->mgmt_stypes = mwifiex_mgmt_stypes;
+ if (adapter->host_mlme_enabled) {
+ memcpy(adapter->mwifiex_mgmt_stypes,
+ mwifiex_mgmt_stypes,
+ NUM_NL80211_IFTYPES *
+ sizeof(struct ieee80211_txrx_stypes));
+
+ adapter->mwifiex_mgmt_stypes[NL80211_IFTYPE_AP].tx = 0xffff;
+ adapter->mwifiex_mgmt_stypes[NL80211_IFTYPE_AP].rx =
+ BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4);
+ wiphy->mgmt_stypes = adapter->mwifiex_mgmt_stypes;
+ } else {
+ wiphy->mgmt_stypes = mwifiex_mgmt_stypes;
+ }
wiphy->max_remain_on_channel_duration = 5000;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_AP);
+ wiphy->max_num_akm_suites = CFG80211_MAX_NUM_AKM_SUITES;
+
if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
- wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
- if (adapter->config_bands & BAND_A)
- wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
- else
+ wiphy->bands[NL80211_BAND_2GHZ] = devm_kmemdup(adapter->dev,
+ &mwifiex_band_2ghz,
+ sizeof(mwifiex_band_2ghz),
+ GFP_KERNEL);
+ if (!wiphy->bands[NL80211_BAND_2GHZ]) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (adapter->config_bands & BAND_A) {
+ wiphy->bands[NL80211_BAND_5GHZ] = devm_kmemdup(adapter->dev,
+ &mwifiex_band_5ghz,
+ sizeof(mwifiex_band_5ghz),
+ GFP_KERNEL);
+ if (!wiphy->bands[NL80211_BAND_5GHZ]) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ } else {
wiphy->bands[NL80211_BAND_5GHZ] = NULL;
+ }
if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
@@ -4395,14 +4808,18 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
ether_addr_copy(wiphy->perm_addr, adapter->perm_addr);
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
- WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
+ wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_HAS_CHANNEL_SWITCH |
WIPHY_FLAG_NETNS_OK |
WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ if (adapter->host_mlme_enabled)
+ wiphy->flags |= WIPHY_FLAG_REPORTS_OBSS;
+ else
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME;
+
if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
@@ -4432,6 +4849,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_NEED_OBSS_SCAN;
+ if (adapter->host_mlme_enabled)
+ wiphy->features |= NL80211_FEATURE_SAE;
+
if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
wiphy->features |= NL80211_FEATURE_HT_IBSS;
@@ -4461,8 +4881,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
if (ret < 0) {
mwifiex_dbg(adapter, ERROR,
"%s: wiphy_register failed: %d\n", __func__, ret);
- wiphy_free(wiphy);
- return ret;
+ goto err;
}
if (!adapter->regd) {
@@ -4504,4 +4923,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
adapter->wiphy = wiphy;
return ret;
+
+err:
+ wiphy_free(wiphy);
+
+ return ret;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 9eff29a25544..1cff001bdc51 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -5,7 +5,7 @@
* Copyright 2011-2020 NXP
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "decl.h"
#include "ioctl.h"
#include "util.h"
@@ -482,7 +482,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
if ((adapter->event_cause & EVENT_ID_MASK) == EVENT_RADAR_DETECTED) {
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv && mwifiex_is_11h_active(priv)) {
+ if (mwifiex_is_11h_active(priv)) {
adapter->event_cause |=
((priv->bss_num & 0xff) << 16) |
((priv->bss_type & 0xff) << 24);
@@ -635,6 +635,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
case HostCmd_CMD_UAP_STA_DEAUTH:
case HOST_CMD_APCMD_SYS_RESET:
case HOST_CMD_APCMD_STA_LIST:
+ case HostCmd_CMD_CHAN_REPORT_REQUEST:
+ case HostCmd_CMD_ADD_NEW_STATION:
ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action,
cmd_oid, data_buf,
cmd_ptr);
@@ -924,6 +926,24 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
return ret;
}
+void mwifiex_process_assoc_resp(struct mwifiex_adapter *adapter)
+{
+ struct cfg80211_rx_assoc_resp_data assoc_resp = {
+ .uapsd_queues = -1,
+ };
+ struct mwifiex_private *priv =
+ mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+
+ if (priv->assoc_rsp_size) {
+ assoc_resp.links[0].bss = priv->req_bss;
+ assoc_resp.buf = priv->assoc_rsp_buf;
+ assoc_resp.len = priv->assoc_rsp_size;
+ cfg80211_rx_assoc_resp(priv->netdev,
+ &assoc_resp);
+ priv->assoc_rsp_size = 0;
+ }
+}
+
/*
* This function handles the timeout of command sending.
*
@@ -1672,6 +1692,13 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
if (adapter->fw_api_ver == MWIFIEX_FW_V15)
adapter->scan_chan_gap_enabled = true;
+ if (adapter->key_api_major_ver != KEY_API_VER_MAJOR_V2)
+ adapter->host_mlme_enabled = false;
+
+ mwifiex_dbg(adapter, MSG, "host_mlme: %s, key_api: %d\n",
+ adapter->host_mlme_enabled ? "enable" : "disable",
+ adapter->key_api_major_ver);
+
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h
index 326ffb05d791..84603f1e7f6e 100644
--- a/drivers/net/wireless/marvell/mwifiex/decl.h
+++ b/drivers/net/wireless/marvell/mwifiex/decl.h
@@ -31,6 +31,29 @@
* + sizeof(tx_control)
*/
+#define FRMCTL_LEN 2
+#define DURATION_LEN 2
+#define SEQCTL_LEN 2
+/* special FW 4 address management header */
+#define MWIFIEX_MGMT_HEADER_LEN (FRMCTL_LEN + DURATION_LEN + ETH_ALEN + \
+ ETH_ALEN + ETH_ALEN + SEQCTL_LEN + ETH_ALEN)
+
+#define AUTH_ALG_LEN 2
+#define AUTH_TRANSACTION_LEN 2
+#define AUTH_STATUS_LEN 2
+#define MWIFIEX_AUTH_BODY_LEN (AUTH_ALG_LEN + AUTH_TRANSACTION_LEN + \
+ AUTH_STATUS_LEN)
+
+#define HOST_MLME_AUTH_PENDING BIT(0)
+#define HOST_MLME_AUTH_DONE BIT(1)
+
+#define HOST_MLME_MGMT_MASK (BIT(IEEE80211_STYPE_AUTH >> 4) | \
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) | \
+ BIT(IEEE80211_STYPE_DISASSOC >> 4))
+#define AUTH_TX_DEFAULT_WAIT_TIME 2400
+
+#define WLAN_AUTH_NONE 0xFFFF
+
#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
#define MWIFIEX_MAX_TDLS_PEER_SUPPORTED 8
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 3adc447b715f..d03129d5d24e 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -210,6 +210,9 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_RANDOM_MAC (PROPRIETARY_TLV_BASE_ID + 236)
#define TLV_TYPE_CHAN_ATTR_CFG (PROPRIETARY_TLV_BASE_ID + 237)
#define TLV_TYPE_MAX_CONN (PROPRIETARY_TLV_BASE_ID + 279)
+#define TLV_TYPE_HOST_MLME (PROPRIETARY_TLV_BASE_ID + 307)
+#define TLV_TYPE_UAP_STA_FLAGS (PROPRIETARY_TLV_BASE_ID + 313)
+#define TLV_TYPE_SAE_PWE_MODE (PROPRIETARY_TLV_BASE_ID + 339)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -405,6 +408,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_STA_CONFIGURE 0x023f
#define HostCmd_CMD_CHAN_REGION_CFG 0x0242
#define HostCmd_CMD_PACKET_AGGR_CTRL 0x0251
+#define HostCmd_CMD_ADD_NEW_STATION 0x025f
#define PROTOCOL_NO_SECURITY 0x01
#define PROTOCOL_STATIC_WEP 0x02
@@ -415,6 +419,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define KEY_MGMT_NONE 0x04
#define KEY_MGMT_PSK 0x02
#define KEY_MGMT_EAP 0x01
+#define KEY_MGMT_PSK_SHA256 0x100
+#define KEY_MGMT_SAE 0x400
#define CIPHER_TKIP 0x04
#define CIPHER_AES_CCMP 0x08
#define VALID_CIPHER_BITMAP 0x0c
@@ -500,6 +506,9 @@ enum mwifiex_channel_flags {
#define HostCmd_ACT_GET_TX 0x0008
#define HostCmd_ACT_GET_BOTH 0x000c
+#define HostCmd_ACT_REMOVE_STA 0x0
+#define HostCmd_ACT_ADD_STA 0x1
+
#define RF_ANTENNA_AUTO 0xFFFF
#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) \
@@ -744,6 +753,25 @@ struct uap_rxpd {
u8 flags;
} __packed;
+struct mwifiex_auth {
+ __le16 auth_alg;
+ __le16 auth_transaction;
+ __le16 status_code;
+ /* possibly followed by Challenge text */
+ u8 variable[];
+} __packed;
+
+struct mwifiex_ieee80211_mgmt {
+ __le16 frame_control;
+ __le16 duration;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ struct mwifiex_auth auth;
+} __packed;
+
struct mwifiex_fw_chan_stats {
u8 chan_num;
u8 bandcfg;
@@ -803,6 +831,11 @@ struct mwifiex_ie_types_ssid_param_set {
u8 ssid[];
} __packed;
+struct mwifiex_ie_types_host_mlme {
+ struct mwifiex_ie_types_header header;
+ u8 host_mlme;
+} __packed;
+
struct mwifiex_ie_types_num_probes {
struct mwifiex_ie_types_header header;
__le16 num_probes;
@@ -906,6 +939,13 @@ struct mwifiex_ie_types_tdls_idle_timeout {
__le16 value;
} __packed;
+#define MWIFIEX_AUTHTYPE_SAE 6
+
+struct mwifiex_ie_types_sae_pwe_mode {
+ struct mwifiex_ie_types_header header;
+ u8 pwe[];
+} __packed;
+
struct mwifiex_ie_types_rsn_param_set {
struct mwifiex_ie_types_header header;
u8 rsn_ie[];
@@ -1587,7 +1627,7 @@ struct host_cmd_ds_802_11_scan_rsp {
struct host_cmd_ds_802_11_scan_ext {
u32 reserved;
- u8 tlv_buffer[1];
+ u8 tlv_buffer[];
} __packed;
struct mwifiex_ie_types_bss_mode {
@@ -2298,6 +2338,20 @@ struct host_cmd_ds_sta_configure {
u8 tlv_buffer[];
} __packed;
+struct mwifiex_ie_types_sta_flag {
+ struct mwifiex_ie_types_header header;
+ __le32 sta_flags;
+} __packed;
+
+struct host_cmd_ds_add_station {
+ __le16 action;
+ __le16 aid;
+ u8 peer_mac[ETH_ALEN];
+ __le32 listen_interval;
+ __le16 cap_info;
+ u8 tlv[];
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -2376,6 +2430,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_chan_region_cfg reg_cfg;
struct host_cmd_ds_pkt_aggr_ctrl pkt_aggr_ctrl;
struct host_cmd_ds_sta_configure sta_cfg;
+ struct host_cmd_ds_add_station sta_info;
} params;
} __packed;
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index c9c58419c37b..8b61e45cd667 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -81,6 +81,9 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->bcn_avg_factor = DEFAULT_BCN_AVG_FACTOR;
priv->data_avg_factor = DEFAULT_DATA_AVG_FACTOR;
+ priv->auth_flag = 0;
+ priv->auth_alg = WLAN_AUTH_NONE;
+
priv->sec_info.wep_enabled = 0;
priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
priv->sec_info.encryption_mode = 0;
@@ -220,6 +223,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->cmd_resp_received = false;
adapter->event_received = false;
adapter->data_received = false;
+ adapter->assoc_resp_received = false;
+ adapter->priv_link_lost = NULL;
+ adapter->host_mlme_link_lost = false;
clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
@@ -362,15 +368,13 @@ static void mwifiex_invalidate_lists(struct mwifiex_adapter *adapter)
list_del(&adapter->bss_prio_tbl[i].bss_prio_head);
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- priv = adapter->priv[i];
- for (j = 0; j < MAX_NUM_TID; ++j)
- list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
- list_del(&priv->tx_ba_stream_tbl_ptr);
- list_del(&priv->rx_reorder_tbl_ptr);
- list_del(&priv->sta_list);
- list_del(&priv->auto_tdls_list);
- }
+ priv = adapter->priv[i];
+ for (j = 0; j < MAX_NUM_TID; ++j)
+ list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
+ list_del(&priv->tx_ba_stream_tbl_ptr);
+ list_del(&priv->rx_reorder_tbl_ptr);
+ list_del(&priv->sta_list);
+ list_del(&priv->auto_tdls_list);
}
}
@@ -419,13 +423,11 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
spin_lock_init(&adapter->mwifiex_cmd_lock);
spin_lock_init(&adapter->queue_lock);
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- priv = adapter->priv[i];
- spin_lock_init(&priv->wmm.ra_list_spinlock);
- spin_lock_init(&priv->curr_bcn_buf_lock);
- spin_lock_init(&priv->sta_list_spinlock);
- spin_lock_init(&priv->auto_tdls_lock);
- }
+ priv = adapter->priv[i];
+ spin_lock_init(&priv->wmm.ra_list_spinlock);
+ spin_lock_init(&priv->curr_bcn_buf_lock);
+ spin_lock_init(&priv->sta_list_spinlock);
+ spin_lock_init(&priv->auto_tdls_lock);
}
/* Initialize cmd_free_q */
@@ -449,8 +451,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
}
for (i = 0; i < adapter->priv_num; i++) {
- if (!adapter->priv[i])
- continue;
priv = adapter->priv[i];
for (j = 0; j < MAX_NUM_TID; ++j)
INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[j].ra_list);
@@ -500,31 +500,24 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
mwifiex_init_adapter(adapter);
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- priv = adapter->priv[i];
+ priv = adapter->priv[i];
- /* Initialize private structure */
- ret = mwifiex_init_priv(priv);
- if (ret)
- return -1;
- }
+ /* Initialize private structure */
+ ret = mwifiex_init_priv(priv);
+ if (ret)
+ return -1;
}
if (adapter->mfg_mode) {
adapter->hw_status = MWIFIEX_HW_STATUS_READY;
ret = -EINPROGRESS;
} else {
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- ret = mwifiex_sta_init_cmd(adapter->priv[i],
- first_sta, true);
- if (ret == -1)
- return -1;
-
- first_sta = false;
- }
-
-
+ ret = mwifiex_sta_init_cmd(adapter->priv[i],
+ first_sta, true);
+ if (ret == -1)
+ return -1;
+ first_sta = false;
}
}
@@ -631,13 +624,11 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
/* Clean up Tx/Rx queues and delete BSS priority table */
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- priv = adapter->priv[i];
+ priv = adapter->priv[i];
- mwifiex_clean_auto_tdls(priv);
- mwifiex_abort_cac(priv);
- mwifiex_free_priv(priv);
- }
+ mwifiex_clean_auto_tdls(priv);
+ mwifiex_abort_cac(priv);
+ mwifiex_free_priv(priv);
}
atomic_set(&adapter->tx_queued, 0);
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index e8825f302de8..516159b721d3 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -158,6 +158,11 @@ struct mwifiex_bss_info {
u8 bssid[ETH_ALEN];
};
+struct mwifiex_sta_info {
+ u8 peer_mac[ETH_ALEN];
+ struct station_parameters *params;
+};
+
#define MAX_NUM_TID 8
#define MAX_RX_WINSIZE 64
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index 9d98a1908dd6..6d8f1d1d7ca4 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -382,7 +382,9 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
struct mwifiex_ie_types_ss_param_set *ss_tlv;
struct mwifiex_ie_types_rates_param_set *rates_tlv;
struct mwifiex_ie_types_auth_type *auth_tlv;
+ struct mwifiex_ie_types_sae_pwe_mode *sae_pwe_tlv;
struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
+ struct mwifiex_ie_types_host_mlme *host_mlme_tlv;
u8 rates[MWIFIEX_SUPPORTED_RATES];
u32 rates_size;
u16 tmp_cap;
@@ -460,6 +462,24 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
pos += sizeof(auth_tlv->header) + le16_to_cpu(auth_tlv->header.len);
+ if (priv->sec_info.authentication_mode == WLAN_AUTH_SAE) {
+ auth_tlv->auth_type = cpu_to_le16(MWIFIEX_AUTHTYPE_SAE);
+ if (bss_desc->bcn_rsnx_ie &&
+ bss_desc->bcn_rsnx_ie->ieee_hdr.len &&
+ (bss_desc->bcn_rsnx_ie->data[0] &
+ WLAN_RSNX_CAPA_SAE_H2E)) {
+ sae_pwe_tlv =
+ (struct mwifiex_ie_types_sae_pwe_mode *)pos;
+ sae_pwe_tlv->header.type =
+ cpu_to_le16(TLV_TYPE_SAE_PWE_MODE);
+ sae_pwe_tlv->header.len =
+ cpu_to_le16(sizeof(sae_pwe_tlv->pwe[0]));
+ sae_pwe_tlv->pwe[0] = bss_desc->bcn_rsnx_ie->data[0];
+ pos += sizeof(sae_pwe_tlv->header) +
+ sizeof(sae_pwe_tlv->pwe[0]);
+ }
+ }
+
if (IS_SUPPORT_MULTI_BANDS(priv->adapter) &&
!(ISSUPP_11NENABLED(priv->adapter->fw_cap_info) &&
(!bss_desc->disable_11n) &&
@@ -491,6 +511,16 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
sizeof(struct mwifiex_chan_scan_param_set);
}
+ if (priv->adapter->host_mlme_enabled) {
+ host_mlme_tlv = (struct mwifiex_ie_types_host_mlme *)pos;
+ host_mlme_tlv->header.type = cpu_to_le16(TLV_TYPE_HOST_MLME);
+ host_mlme_tlv->header.len =
+ cpu_to_le16(sizeof(host_mlme_tlv->host_mlme));
+ host_mlme_tlv->host_mlme = 1;
+ pos += sizeof(host_mlme_tlv->header) +
+ sizeof(host_mlme_tlv->host_mlme);
+ }
+
if (!priv->wps.session_enable) {
if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos);
@@ -641,7 +671,21 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
goto done;
}
- assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
+ if (adapter->host_mlme_enabled) {
+ struct ieee80211_mgmt *hdr;
+
+ hdr = (struct ieee80211_mgmt *)&resp->params;
+ if (!memcmp(hdr->bssid,
+ priv->attempted_bss_desc->mac_address,
+ ETH_ALEN))
+ assoc_rsp = (struct ieee_types_assoc_rsp *)
+ &hdr->u.assoc_resp;
+ else
+ assoc_rsp =
+ (struct ieee_types_assoc_rsp *)&resp->params;
+ } else {
+ assoc_rsp = (struct ieee_types_assoc_rsp *)&resp->params;
+ }
cap_info = le16_to_cpu(assoc_rsp->cap_info_bitmap);
status_code = le16_to_cpu(assoc_rsp->status_code);
@@ -680,6 +724,9 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, ERROR,
"ASSOC_RESP: UNSPECIFIED failure\n");
}
+
+ if (priv->adapter->host_mlme_enabled)
+ priv->assoc_rsp_size = 0;
} else {
ret = status_code;
}
@@ -778,7 +825,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
priv->adapter->dbg.num_cmd_assoc_success++;
- mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: associated\n");
+ mwifiex_dbg(priv->adapter, MSG, "assoc: associated with %pM\n",
+ priv->attempted_bss_desc->mac_address);
/* Add the ra_list here for infra mode as there will be only 1 ra
always */
@@ -1491,6 +1539,20 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
if (!priv->media_connected)
return 0;
+ if (priv->adapter->host_mlme_enabled) {
+ priv->auth_flag = 0;
+ priv->auth_alg = WLAN_AUTH_NONE;
+ priv->host_mlme_reg = false;
+ priv->mgmt_frame_mask = 0;
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->mgmt_frame_mask, false)) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "could not unregister mgmt frame rx\n");
+ return -1;
+ }
+ }
+
switch (priv->bss_mode) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
@@ -1520,8 +1582,7 @@ void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv)
- mwifiex_deauthenticate(priv, NULL);
+ mwifiex_deauthenticate(priv, NULL);
}
}
EXPORT_SYMBOL_GPL(mwifiex_deauthenticate_all);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index d99127dc466e..96d1f6039fbc 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -127,10 +127,8 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
/* Free private structures */
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- mwifiex_free_curr_bcn(adapter->priv[i]);
- kfree(adapter->priv[i]);
- }
+ mwifiex_free_curr_bcn(adapter->priv[i]);
+ kfree(adapter->priv[i]);
}
if (adapter->nd_info) {
@@ -530,6 +528,11 @@ static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
destroy_workqueue(adapter->rx_workqueue);
adapter->rx_workqueue = NULL;
}
+
+ if (adapter->host_mlme_workqueue) {
+ destroy_workqueue(adapter->host_mlme_workqueue);
+ adapter->host_mlme_workqueue = NULL;
+ }
}
/*
@@ -802,6 +805,10 @@ mwifiex_bypass_tx_queue(struct mwifiex_private *priv,
"bypass txqueue; eth type %#x, mgmt %d\n",
ntohs(eth_hdr->h_proto),
mwifiex_is_skb_mgmt_frame(skb));
+ if (eth_hdr->h_proto == htons(ETH_P_PAE))
+ mwifiex_dbg(priv->adapter, MSG,
+ "key: send EAPOL to %pM\n",
+ eth_hdr->h_dest);
return true;
}
@@ -1162,7 +1169,7 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
}
for (i = 0; i < adapter->priv_num; i++) {
- if (!adapter->priv[i] || !adapter->priv[i]->netdev)
+ if (!adapter->priv[i]->netdev)
continue;
priv = adapter->priv[i];
p += sprintf(p, "\n[interface : \"%s\"]\n",
@@ -1201,7 +1208,7 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL);
if (debug_info) {
for (i = 0; i < adapter->priv_num; i++) {
- if (!adapter->priv[i] || !adapter->priv[i]->netdev)
+ if (!adapter->priv[i]->netdev)
continue;
priv = adapter->priv[i];
mwifiex_get_debug_info(priv, debug_info);
@@ -1384,6 +1391,35 @@ int is_command_pending(struct mwifiex_adapter *adapter)
return !is_cmd_pend_q_empty;
}
+/* This is the host mlme work queue function.
+ * It handles the host mlme operations.
+ */
+static void mwifiex_host_mlme_work_queue(struct work_struct *work)
+{
+ struct mwifiex_adapter *adapter =
+ container_of(work, struct mwifiex_adapter, host_mlme_work);
+
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
+ return;
+
+ /* Check for host mlme disconnection */
+ if (adapter->host_mlme_link_lost) {
+ if (adapter->priv_link_lost) {
+ mwifiex_reset_connect_state(adapter->priv_link_lost,
+ WLAN_REASON_DEAUTH_LEAVING,
+ true);
+ adapter->priv_link_lost = NULL;
+ }
+ adapter->host_mlme_link_lost = false;
+ }
+
+ /* Check for host mlme Assoc Resp */
+ if (adapter->assoc_resp_received) {
+ mwifiex_process_assoc_resp(adapter);
+ adapter->assoc_resp_received = false;
+ }
+}
+
/*
* This is the RX work queue function.
*
@@ -1434,7 +1470,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
/* Stop data */
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv && priv->netdev) {
+ if (priv->netdev) {
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
@@ -1459,8 +1495,6 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (!priv)
- continue;
rtnl_lock();
if (priv->netdev &&
priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) {
@@ -1558,6 +1592,18 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue);
}
+ if (adapter->host_mlme_enabled) {
+ adapter->host_mlme_workqueue =
+ alloc_workqueue("MWIFIEX_HOST_MLME_WORK_QUEUE",
+ WQ_HIGHPRI |
+ WQ_MEM_RECLAIM |
+ WQ_UNBOUND, 0);
+ if (!adapter->host_mlme_workqueue)
+ goto err_kmalloc;
+ INIT_WORK(&adapter->host_mlme_work,
+ mwifiex_host_mlme_work_queue);
+ }
+
/* Register the device. Fill up the private data structure with
* relevant information from the card. Some code extracted from
* mwifiex_register_dev()
@@ -1721,6 +1767,18 @@ mwifiex_add_card(void *card, struct completion *fw_done,
goto err_registerdev;
}
+ if (adapter->host_mlme_enabled) {
+ adapter->host_mlme_workqueue =
+ alloc_workqueue("MWIFIEX_HOST_MLME_WORK_QUEUE",
+ WQ_HIGHPRI |
+ WQ_MEM_RECLAIM |
+ WQ_UNBOUND, 0);
+ if (!adapter->host_mlme_workqueue)
+ goto err_kmalloc;
+ INIT_WORK(&adapter->host_mlme_work,
+ mwifiex_host_mlme_work_queue);
+ }
+
if (mwifiex_init_hw_fw(adapter, true)) {
pr_err("%s: firmware init failed\n", __func__);
goto err_init_fw;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index c5164ae41b54..566adce3413c 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -424,6 +424,8 @@ struct mwifiex_bssdescriptor {
u16 wpa_offset;
struct ieee_types_generic *bcn_rsn_ie;
u16 rsn_offset;
+ struct ieee_types_generic *bcn_rsnx_ie;
+ u16 rsnx_offset;
struct ieee_types_generic *bcn_wapi_ie;
u16 wapi_offset;
u8 *beacon_buf;
@@ -525,6 +527,8 @@ struct mwifiex_private {
u8 bss_priority;
u8 bss_num;
u8 bss_started;
+ u8 auth_flag;
+ u16 auth_alg;
u8 frame_type;
u8 curr_addr[ETH_ALEN];
u8 media_connected;
@@ -608,6 +612,7 @@ struct mwifiex_private {
#define MWIFIEX_ASSOC_RSP_BUF_SIZE 500
u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE];
u32 assoc_rsp_size;
+ struct cfg80211_bss *req_bss;
#define MWIFIEX_GENIE_BUF_SIZE 256
u8 gen_ie_buf[MWIFIEX_GENIE_BUF_SIZE];
@@ -647,6 +652,7 @@ struct mwifiex_private {
u16 gen_idx;
u8 ap_11n_enabled;
u8 ap_11ac_enabled;
+ bool host_mlme_reg;
u32 mgmt_frame_mask;
struct mwifiex_roc_cfg roc_cfg;
bool scan_aborting;
@@ -793,7 +799,7 @@ struct mwifiex_auto_tdls_peer {
u8 mac_addr[ETH_ALEN];
u8 tdls_status;
int rssi;
- long rssi_jiffies;
+ unsigned long rssi_jiffies;
u8 failure_count;
u8 do_discover;
u8 do_setup;
@@ -876,6 +882,8 @@ struct mwifiex_adapter {
struct work_struct main_work;
struct workqueue_struct *rx_workqueue;
struct work_struct rx_work;
+ struct workqueue_struct *host_mlme_workqueue;
+ struct work_struct host_mlme_work;
bool rx_work_enabled;
bool rx_processing;
bool delay_main_work;
@@ -907,6 +915,9 @@ struct mwifiex_adapter {
u8 cmd_resp_received;
u8 event_received;
u8 data_received;
+ u8 assoc_resp_received;
+ struct mwifiex_private *priv_link_lost;
+ u8 host_mlme_link_lost;
u16 seq_num;
struct cmd_ctrl_node *cmd_pool;
struct cmd_ctrl_node *curr_cmd;
@@ -996,6 +1007,8 @@ struct mwifiex_adapter {
bool is_up;
bool ext_scan;
+ bool host_mlme_enabled;
+ struct ieee80211_txrx_stypes mwifiex_mgmt_stypes[NUM_NL80211_IFTYPES];
u8 fw_api_ver;
u8 key_api_major_ver, key_api_minor_ver;
u8 max_p2p_conn, max_sta_conn;
@@ -1061,6 +1074,9 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb);
int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
struct sk_buff *skb);
+void mwifiex_host_mlme_disconnect(struct mwifiex_private *priv,
+ u16 reason_code, u8 *sa);
+
int mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
struct sk_buff *skb);
@@ -1092,6 +1108,7 @@ void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter);
int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter);
+void mwifiex_process_assoc_resp(struct mwifiex_adapter *adapter);
int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
struct sk_buff *skb);
int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
@@ -1286,14 +1303,12 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter,
int i;
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
- continue;
+ if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
+ continue;
- if ((adapter->priv[i]->bss_num == bss_num) &&
- (adapter->priv[i]->bss_type == bss_type))
- break;
- }
+ if ((adapter->priv[i]->bss_num == bss_num) &&
+ (adapter->priv[i]->bss_type == bss_type))
+ break;
}
return ((i < adapter->priv_num) ? adapter->priv[i] : NULL);
}
@@ -1309,11 +1324,9 @@ mwifiex_get_priv(struct mwifiex_adapter *adapter,
int i;
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- if (bss_role == MWIFIEX_BSS_ROLE_ANY ||
- GET_BSS_ROLE(adapter->priv[i]) == bss_role)
- break;
- }
+ if (bss_role == MWIFIEX_BSS_ROLE_ANY ||
+ GET_BSS_ROLE(adapter->priv[i]) == bss_role)
+ break;
}
return ((i < adapter->priv_num) ? adapter->priv[i] : NULL);
@@ -1331,12 +1344,10 @@ mwifiex_get_unused_bss_num(struct mwifiex_adapter *adapter, u8 bss_type)
memset(index, 0, sizeof(index));
for (i = 0; i < adapter->priv_num; i++)
- if (adapter->priv[i]) {
- if (adapter->priv[i]->bss_type == bss_type &&
- !(adapter->priv[i]->bss_mode ==
- NL80211_IFTYPE_UNSPECIFIED)) {
- index[adapter->priv[i]->bss_num] = 1;
- }
+ if (adapter->priv[i]->bss_type == bss_type &&
+ !(adapter->priv[i]->bss_mode ==
+ NL80211_IFTYPE_UNSPECIFIED)) {
+ index[adapter->priv[i]->bss_num] = 1;
}
for (j = 0; j < MWIFIEX_MAX_BSS_NUM; j++)
if (!index[j])
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 0326b121747c..cab889af4c4a 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1371,6 +1371,12 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
bss_entry->rsn_offset = (u16) (current_ptr -
bss_entry->beacon_buf);
break;
+ case WLAN_EID_RSNX:
+ bss_entry->bcn_rsnx_ie =
+ (struct ieee_types_generic *)current_ptr;
+ bss_entry->rsnx_offset =
+ (u16)(current_ptr - bss_entry->beacon_buf);
+ break;
case WLAN_EID_BSS_AC_ACCESS_DELAY:
bss_entry->bcn_wapi_ie =
(struct ieee_types_generic *) current_ptr;
@@ -2045,8 +2051,6 @@ void mwifiex_cancel_scan(struct mwifiex_adapter *adapter)
spin_unlock_bh(&adapter->mwifiex_cmd_lock);
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (!priv)
- continue;
if (priv->scan_request) {
struct cfg80211_scan_info info = {
.aborted = true,
@@ -2530,8 +2534,7 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
ext_scan_resp = &resp->params.ext_scan;
tlv = (void *)ext_scan_resp->tlv_buffer;
- buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN
- - 1);
+ buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN);
while (buf_left >= sizeof(struct mwifiex_ie_types_header)) {
type = le16_to_cpu(tlv->type);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index bda9b2b8a1f3..490ffd981164 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -332,6 +332,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.can_auto_tdls = false,
.can_ext_scan = false,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -348,6 +349,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -364,6 +366,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -380,6 +383,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
@@ -397,6 +401,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8978 = {
@@ -414,6 +419,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8978 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = true,
+ .host_mlme = true,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
@@ -432,6 +438,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
@@ -448,6 +455,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
.can_auto_tdls = true,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
@@ -465,6 +473,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
.can_auto_tdls = true,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
@@ -481,6 +490,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static struct memory_type_mapping generic_mem_type_map[] = {
@@ -574,6 +584,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->can_auto_tdls = data->can_auto_tdls;
card->can_ext_scan = data->can_ext_scan;
card->fw_ready_extra_delay = data->fw_ready_extra_delay;
+ card->host_mlme = data->host_mlme;
INIT_WORK(&card->work, mwifiex_sdio_work);
}
@@ -2511,6 +2522,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
}
+ adapter->host_mlme_enabled = card->host_mlme;
+
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index cb63ad55d675..65d142286c46 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -256,6 +256,7 @@ struct sdio_mmc_card {
bool can_auto_tdls;
bool can_ext_scan;
bool fw_ready_extra_delay;
+ bool host_mlme;
struct mwifiex_sdio_mpa_tx mpa_tx;
struct mwifiex_sdio_mpa_rx mpa_rx;
@@ -280,6 +281,7 @@ struct mwifiex_sdio_device {
bool can_auto_tdls;
bool can_ext_scan;
bool fw_ready_extra_delay;
+ bool host_mlme;
};
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 7b69d27e0c0e..9c53825f222d 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -1398,6 +1398,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_UAP_STA_DEAUTH:
break;
+ case HostCmd_CMD_ADD_NEW_STATION:
+ break;
case HOST_CMD_APCMD_SYS_RESET:
break;
case HostCmd_CMD_MEF_CFG:
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index df9cdd10a494..b5f3821a6a8f 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -135,6 +135,9 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code,
priv->media_connected = false;
+ priv->auth_flag = 0;
+ priv->auth_alg = WLAN_AUTH_NONE;
+
priv->scan_block = false;
priv->port_open = false;
@@ -222,8 +225,12 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code,
priv->cfg_bssid, reason_code);
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
- cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
- !from_ap, GFP_KERNEL);
+ if (adapter->host_mlme_enabled && adapter->host_mlme_link_lost)
+ mwifiex_host_mlme_disconnect(adapter->priv_link_lost,
+ reason_code, NULL);
+ else
+ cfg80211_disconnected(priv->netdev, reason_code, NULL,
+ 0, !from_ap, GFP_KERNEL);
}
eth_zero_addr(priv->cfg_bssid);
@@ -746,7 +753,15 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (priv->media_connected) {
reason_code =
get_unaligned_le16(adapter->event_body);
- mwifiex_reset_connect_state(priv, reason_code, true);
+ if (adapter->host_mlme_enabled) {
+ adapter->priv_link_lost = priv;
+ adapter->host_mlme_link_lost = true;
+ queue_work(adapter->host_mlme_workqueue,
+ &adapter->host_mlme_work);
+ } else {
+ mwifiex_reset_connect_state(priv, reason_code,
+ true);
+ }
}
break;
@@ -999,10 +1014,17 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_REMAIN_ON_CHAN_EXPIRED:
mwifiex_dbg(adapter, EVENT,
"event: Remain on channel expired\n");
- cfg80211_remain_on_channel_expired(&priv->wdev,
- priv->roc_cfg.cookie,
- &priv->roc_cfg.chan,
- GFP_ATOMIC);
+
+ if (adapter->host_mlme_enabled &&
+ (priv->auth_flag & HOST_MLME_AUTH_PENDING)) {
+ priv->auth_flag = 0;
+ priv->auth_alg = WLAN_AUTH_NONE;
+ } else {
+ cfg80211_remain_on_channel_expired(&priv->wdev,
+ priv->roc_cfg.cookie,
+ &priv->roc_cfg.chan,
+ GFP_ATOMIC);
+ }
memset(&priv->roc_cfg, 0x00, sizeof(struct mwifiex_roc_cfg));
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 32a27fad7b79..d3cba6895f8c 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -339,7 +339,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
ret = mwifiex_associate(priv, bss_desc);
}
- if (bss)
+ if (bss && !priv->adapter->host_mlme_enabled)
cfg80211_put_bss(priv->adapter->wiphy, bss);
} else {
/* Adhoc mode */
@@ -503,8 +503,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
if (disconnect_on_suspend) {
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv)
- mwifiex_deauthenticate(priv, NULL);
+ mwifiex_deauthenticate(priv, NULL);
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index 70c2790b8e35..9d0ef04ebe02 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -36,7 +36,7 @@ void mwifiex_process_sta_txpd(struct mwifiex_private *priv,
struct txpd *local_tx_pd;
struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
unsigned int pad;
- u16 pkt_type, pkt_offset;
+ u16 pkt_type, pkt_length, pkt_offset;
int hroom = adapter->intf_hdr_len;
pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
@@ -49,9 +49,10 @@ void mwifiex_process_sta_txpd(struct mwifiex_private *priv,
memset(local_tx_pd, 0, sizeof(struct txpd));
local_tx_pd->bss_num = priv->bss_num;
local_tx_pd->bss_type = priv->bss_type;
- local_tx_pd->tx_pkt_length = cpu_to_le16((u16)(skb->len -
- (sizeof(struct txpd) +
- pad)));
+ pkt_length = (u16)(skb->len - (sizeof(struct txpd) + pad));
+ if (pkt_type == PKT_TYPE_MGMT)
+ pkt_length -= MWIFIEX_MGMT_FRAME_HEADER_SIZE;
+ local_tx_pd->tx_pkt_length = cpu_to_le16(pkt_length);
local_tx_pd->priority = (u8) skb->priority;
local_tx_pd->pkt_delay_2ms =
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 6c60621b6ccc..7823e67694e8 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -1439,8 +1439,8 @@ void mwifiex_check_auto_tdls(struct timer_list *t)
spin_lock_bh(&priv->auto_tdls_lock);
list_for_each_entry(tdls_peer, &priv->auto_tdls_list, list) {
- if ((jiffies - tdls_peer->rssi_jiffies) >
- (MWIFIEX_AUTO_TDLS_IDLE_TIME * HZ)) {
+ if (time_after(jiffies, tdls_peer->rssi_jiffies +
+ MWIFIEX_AUTO_TDLS_IDLE_TIME * HZ)) {
tdls_peer->rssi = 0;
tdls_peer->do_discover = true;
priv->check_tdls_tx = true;
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 491e36611909..1c0ceac6b27f 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -46,31 +46,26 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
bss_config->key_mgmt_operation |= KEY_MGMT_ON_HOST;
+ bss_config->protocol = 0;
+ if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ bss_config->protocol |= PROTOCOL_WPA;
+ if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ bss_config->protocol |= PROTOCOL_WPA2;
+
+ bss_config->key_mgmt = 0;
for (i = 0; i < params->crypto.n_akm_suites; i++) {
switch (params->crypto.akm_suites[i]) {
case WLAN_AKM_SUITE_8021X:
- if (params->crypto.wpa_versions &
- NL80211_WPA_VERSION_1) {
- bss_config->protocol = PROTOCOL_WPA;
- bss_config->key_mgmt = KEY_MGMT_EAP;
- }
- if (params->crypto.wpa_versions &
- NL80211_WPA_VERSION_2) {
- bss_config->protocol |= PROTOCOL_WPA2;
- bss_config->key_mgmt = KEY_MGMT_EAP;
- }
+ bss_config->key_mgmt |= KEY_MGMT_EAP;
break;
case WLAN_AKM_SUITE_PSK:
- if (params->crypto.wpa_versions &
- NL80211_WPA_VERSION_1) {
- bss_config->protocol = PROTOCOL_WPA;
- bss_config->key_mgmt = KEY_MGMT_PSK;
- }
- if (params->crypto.wpa_versions &
- NL80211_WPA_VERSION_2) {
- bss_config->protocol |= PROTOCOL_WPA2;
- bss_config->key_mgmt = KEY_MGMT_PSK;
- }
+ bss_config->key_mgmt |= KEY_MGMT_PSK;
+ break;
+ case WLAN_AKM_SUITE_PSK_SHA256:
+ bss_config->key_mgmt |= KEY_MGMT_PSK_SHA256;
+ break;
+ case WLAN_AKM_SUITE_SAE:
+ bss_config->key_mgmt |= KEY_MGMT_SAE;
break;
default:
break;
@@ -751,6 +746,28 @@ mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action,
return 0;
}
+/* This function prepares AP start up command with or without host MLME
+ */
+static void mwifiex_cmd_uap_bss_start(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd)
+{
+ struct mwifiex_ie_types_host_mlme *tlv;
+ int size;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_UAP_BSS_START);
+ size = S_DS_GEN;
+
+ if (priv->adapter->host_mlme_enabled) {
+ tlv = (struct mwifiex_ie_types_host_mlme *)((u8 *)cmd + size);
+ tlv->header.type = cpu_to_le16(TLV_TYPE_HOST_MLME);
+ tlv->header.len = cpu_to_le16(sizeof(tlv->host_mlme));
+ tlv->host_mlme = 1;
+ size += sizeof(struct mwifiex_ie_types_host_mlme);
+ }
+
+ cmd->size = cpu_to_le16(size);
+}
+
/* This function prepares AP specific deauth command with mac supplied in
* function parameter.
*/
@@ -768,6 +785,144 @@ static int mwifiex_cmd_uap_sta_deauth(struct mwifiex_private *priv,
return 0;
}
+/* This function prepares AP specific add station command.
+ */
+static int mwifiex_cmd_uap_add_station(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, void *data_buf)
+{
+ struct host_cmd_ds_add_station *new_sta = &cmd->params.sta_info;
+ struct mwifiex_sta_info *add_sta = (struct mwifiex_sta_info *)data_buf;
+ struct station_parameters *params = add_sta->params;
+ struct mwifiex_sta_node *sta_ptr;
+ u8 *pos;
+ u8 qos_capa;
+ u16 header_len = sizeof(struct mwifiex_ie_types_header);
+ u16 tlv_len;
+ int size;
+ struct mwifiex_ie_types_data *tlv;
+ struct mwifiex_ie_types_sta_flag *sta_flag;
+ int i;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_ADD_NEW_STATION);
+ new_sta->action = cpu_to_le16(cmd_action);
+ size = sizeof(struct host_cmd_ds_add_station) + S_DS_GEN;
+
+ if (cmd_action == HostCmd_ACT_ADD_STA)
+ sta_ptr = mwifiex_add_sta_entry(priv, add_sta->peer_mac);
+ else
+ sta_ptr = mwifiex_get_sta_entry(priv, add_sta->peer_mac);
+
+ if (!sta_ptr)
+ return -1;
+
+ memcpy(new_sta->peer_mac, add_sta->peer_mac, ETH_ALEN);
+
+ if (cmd_action == HostCmd_ACT_REMOVE_STA) {
+ cmd->size = cpu_to_le16(size);
+ return 0;
+ }
+
+ new_sta->aid = cpu_to_le16(params->aid);
+ new_sta->listen_interval = cpu_to_le32(params->listen_interval);
+ new_sta->cap_info = cpu_to_le16(params->capability);
+
+ pos = new_sta->tlv;
+
+ if (params->sta_flags_set & NL80211_STA_FLAG_WME)
+ sta_ptr->is_wmm_enabled = 1;
+ sta_flag = (struct mwifiex_ie_types_sta_flag *)pos;
+ sta_flag->header.type = cpu_to_le16(TLV_TYPE_UAP_STA_FLAGS);
+ sta_flag->header.len = cpu_to_le16(sizeof(__le32));
+ sta_flag->sta_flags = cpu_to_le32(params->sta_flags_set);
+ pos += sizeof(struct mwifiex_ie_types_sta_flag);
+ size += sizeof(struct mwifiex_ie_types_sta_flag);
+
+ if (params->ext_capab_len) {
+ tlv = (struct mwifiex_ie_types_data *)pos;
+ tlv->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
+ tlv_len = params->ext_capab_len;
+ tlv->header.len = cpu_to_le16(tlv_len);
+ memcpy(tlv->data, params->ext_capab, tlv_len);
+ pos += (header_len + tlv_len);
+ size += (header_len + tlv_len);
+ }
+
+ if (params->link_sta_params.supported_rates_len) {
+ tlv = (struct mwifiex_ie_types_data *)pos;
+ tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
+ tlv_len = params->link_sta_params.supported_rates_len;
+ tlv->header.len = cpu_to_le16(tlv_len);
+ memcpy(tlv->data,
+ params->link_sta_params.supported_rates, tlv_len);
+ pos += (header_len + tlv_len);
+ size += (header_len + tlv_len);
+ }
+
+ if (params->uapsd_queues || params->max_sp) {
+ tlv = (struct mwifiex_ie_types_data *)pos;
+ tlv->header.type = cpu_to_le16(WLAN_EID_QOS_CAPA);
+ tlv_len = sizeof(qos_capa);
+ tlv->header.len = cpu_to_le16(tlv_len);
+ qos_capa = params->uapsd_queues | (params->max_sp << 5);
+ memcpy(tlv->data, &qos_capa, tlv_len);
+ pos += (header_len + tlv_len);
+ size += (header_len + tlv_len);
+ sta_ptr->is_wmm_enabled = 1;
+ }
+
+ if (params->link_sta_params.ht_capa) {
+ tlv = (struct mwifiex_ie_types_data *)pos;
+ tlv->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+ tlv_len = sizeof(struct ieee80211_ht_cap);
+ tlv->header.len = cpu_to_le16(tlv_len);
+ memcpy(tlv->data, params->link_sta_params.ht_capa, tlv_len);
+ pos += (header_len + tlv_len);
+ size += (header_len + tlv_len);
+ sta_ptr->is_11n_enabled = 1;
+ sta_ptr->max_amsdu =
+ le16_to_cpu(params->link_sta_params.ht_capa->cap_info) &
+ IEEE80211_HT_CAP_MAX_AMSDU ?
+ MWIFIEX_TX_DATA_BUF_SIZE_8K :
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+ }
+
+ if (params->link_sta_params.vht_capa) {
+ tlv = (struct mwifiex_ie_types_data *)pos;
+ tlv->header.type = cpu_to_le16(WLAN_EID_VHT_CAPABILITY);
+ tlv_len = sizeof(struct ieee80211_vht_cap);
+ tlv->header.len = cpu_to_le16(tlv_len);
+ memcpy(tlv->data, params->link_sta_params.vht_capa, tlv_len);
+ pos += (header_len + tlv_len);
+ size += (header_len + tlv_len);
+ sta_ptr->is_11ac_enabled = 1;
+ }
+
+ if (params->link_sta_params.opmode_notif_used) {
+ tlv = (struct mwifiex_ie_types_data *)pos;
+ tlv->header.type = cpu_to_le16(WLAN_EID_OPMODE_NOTIF);
+ tlv_len = sizeof(u8);
+ tlv->header.len = cpu_to_le16(tlv_len);
+ memcpy(tlv->data, &params->link_sta_params.opmode_notif,
+ tlv_len);
+ pos += (header_len + tlv_len);
+ size += (header_len + tlv_len);
+ }
+
+ for (i = 0; i < MAX_NUM_TID; i++) {
+ if (sta_ptr->is_11n_enabled)
+ sta_ptr->ampdu_sta[i] =
+ priv->aggr_prio_tbl[i].ampdu_user;
+ else
+ sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
+ }
+
+ memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
+ cmd->size = cpu_to_le16(size);
+
+ return 0;
+}
+
/* This function prepares the AP specific commands before sending them
* to the firmware.
* This is a generic function which calls specific command preparation
@@ -785,6 +940,8 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
return -1;
break;
case HostCmd_CMD_UAP_BSS_START:
+ mwifiex_cmd_uap_bss_start(priv, cmd);
+ break;
case HostCmd_CMD_UAP_BSS_STOP:
case HOST_CMD_APCMD_SYS_RESET:
case HOST_CMD_APCMD_STA_LIST:
@@ -800,6 +957,11 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
data_buf))
return -1;
break;
+ case HostCmd_CMD_ADD_NEW_STATION:
+ if (mwifiex_cmd_uap_add_station(priv, cmd, cmd_action,
+ data_buf))
+ return -1;
+ break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd %#x\n", cmd_no);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 515e6db410f2..6085cd50970d 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -745,8 +745,6 @@ static void mwifiex_usb_port_resync(struct mwifiex_adapter *adapter)
if (adapter->usb_mc_status) {
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (!priv)
- continue;
if ((priv->bss_role == MWIFIEX_BSS_ROLE_UAP &&
!priv->bss_started) ||
(priv->bss_role == MWIFIEX_BSS_ROLE_STA &&
@@ -758,8 +756,6 @@ static void mwifiex_usb_port_resync(struct mwifiex_adapter *adapter)
} else {
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (!priv)
- continue;
if ((priv->bss_role == MWIFIEX_BSS_ROLE_UAP &&
priv->bss_started) ||
(priv->bss_role == MWIFIEX_BSS_ROLE_STA &&
@@ -770,8 +766,7 @@ static void mwifiex_usb_port_resync(struct mwifiex_adapter *adapter)
}
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv)
- priv->usb_port = active_port;
+ priv->usb_port = active_port;
}
for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
if (active_port == card->port[i].tx_data_ep)
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 745b1d925b21..42c04bf858da 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -370,6 +370,45 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
return 0;
}
+
+/* This function sends deauth packet to the kernel. */
+void mwifiex_host_mlme_disconnect(struct mwifiex_private *priv,
+ u16 reason_code, u8 *sa)
+{
+ u8 frame_buf[100];
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)frame_buf;
+
+ memset(frame_buf, 0, sizeof(frame_buf));
+ mgmt->frame_control = cpu_to_le16(IEEE80211_STYPE_DEAUTH);
+ mgmt->duration = 0;
+ mgmt->seq_ctrl = 0;
+ mgmt->u.deauth.reason_code = cpu_to_le16(reason_code);
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
+ eth_broadcast_addr(mgmt->da);
+ memcpy(mgmt->sa,
+ priv->curr_bss_params.bss_descriptor.mac_address,
+ ETH_ALEN);
+ memcpy(mgmt->bssid, priv->cfg_bssid, ETH_ALEN);
+ priv->auth_flag = 0;
+ priv->auth_alg = WLAN_AUTH_NONE;
+ } else {
+ memcpy(mgmt->da, priv->curr_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sa, ETH_ALEN);
+ memcpy(mgmt->bssid, priv->curr_addr, ETH_ALEN);
+ }
+
+ if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) {
+ wiphy_lock(priv->wdev.wiphy);
+ cfg80211_rx_mlme_mgmt(priv->netdev, frame_buf, 26);
+ wiphy_unlock(priv->wdev.wiphy);
+ } else {
+ cfg80211_rx_mgmt(&priv->wdev,
+ priv->bss_chandef.chan->center_freq,
+ 0, frame_buf, 26, 0);
+ }
+}
+
/*
* This function processes the received management packet and send it
* to the kernel.
@@ -417,6 +456,71 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
pkt_len -= ETH_ALEN;
rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
+ if (priv->host_mlme_reg &&
+ (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) &&
+ (ieee80211_is_auth(ieee_hdr->frame_control) ||
+ ieee80211_is_deauth(ieee_hdr->frame_control) ||
+ ieee80211_is_disassoc(ieee_hdr->frame_control))) {
+ if (ieee80211_is_auth(ieee_hdr->frame_control)) {
+ if (priv->auth_flag & HOST_MLME_AUTH_PENDING) {
+ if (priv->auth_alg != WLAN_AUTH_SAE) {
+ priv->auth_flag &=
+ ~HOST_MLME_AUTH_PENDING;
+ priv->auth_flag |=
+ HOST_MLME_AUTH_DONE;
+ }
+ } else {
+ return 0;
+ }
+
+ mwifiex_dbg(priv->adapter, MSG,
+ "auth: receive authentication from %pM\n",
+ ieee_hdr->addr3);
+ } else {
+ if (!priv->wdev.connected)
+ return 0;
+
+ if (ieee80211_is_deauth(ieee_hdr->frame_control)) {
+ mwifiex_dbg(priv->adapter, MSG,
+ "auth: receive deauth from %pM\n",
+ ieee_hdr->addr3);
+ priv->auth_flag = 0;
+ priv->auth_alg = WLAN_AUTH_NONE;
+ } else {
+ mwifiex_dbg
+ (priv->adapter, MSG,
+ "assoc: receive disassoc from %pM\n",
+ ieee_hdr->addr3);
+ }
+ }
+
+ cfg80211_rx_mlme_mgmt(priv->netdev, skb->data, pkt_len);
+ }
+
+ if (priv->adapter->host_mlme_enabled &&
+ (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)) {
+ if (ieee80211_is_auth(ieee_hdr->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "auth: receive auth from %pM\n",
+ ieee_hdr->addr2);
+ if (ieee80211_is_deauth(ieee_hdr->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "auth: receive deauth from %pM\n",
+ ieee_hdr->addr2);
+ if (ieee80211_is_disassoc(ieee_hdr->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "assoc: receive disassoc from %pM\n",
+ ieee_hdr->addr2);
+ if (ieee80211_is_assoc_req(ieee_hdr->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "assoc: receive assoc req from %pM\n",
+ ieee_hdr->addr2);
+ if (ieee80211_is_reassoc_req(ieee_hdr->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "assoc: receive reassoc req from %pM\n",
+ ieee_hdr->addr2);
+ }
+
cfg80211_rx_mgmt(&priv->wdev, priv->roc_cfg.chan.center_freq,
CAL_RSSI(rx_pd->snr, rx_pd->nf), skb->data, pkt_len,
0);
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 8558995e8fc7..bcb61dab7dc8 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -454,8 +454,6 @@ int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (!priv)
- continue;
if (adapter->if_ops.is_port_ready &&
!adapter->if_ops.is_port_ready(priv))
continue;
@@ -477,8 +475,6 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; ++i) {
priv = adapter->priv[i];
- if (!priv)
- continue;
if (!priv->port_open &&
(priv->bss_mode != NL80211_IFTYPE_ADHOC))
continue;
@@ -1491,9 +1487,6 @@ void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; ++i) {
priv = adapter->priv[i];
- if (!priv)
- continue;
-
if (adapter->if_ops.is_port_ready &&
!adapter->if_ops.is_port_ready(priv))
continue;
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index b130e057370f..bab9ef37a1ab 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -587,6 +587,7 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv, char *fw_image,
}
struct mwl8k_cmd_pkt {
+ /* New members MUST be added within the __struct_group() macro below. */
__struct_group(mwl8k_cmd_pkt_hdr, hdr, __packed,
__le16 code;
__le16 length;
@@ -596,6 +597,8 @@ struct mwl8k_cmd_pkt {
);
char payload[];
} __packed;
+static_assert(offsetof(struct mwl8k_cmd_pkt, payload) == sizeof(struct mwl8k_cmd_pkt_hdr),
+ "struct member likely outside of __struct_group()");
/*
* Firmware loading.
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index bb291fe314fb..9d5561f44134 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -929,14 +929,19 @@ void mt76_update_survey(struct mt76_phy *phy)
}
EXPORT_SYMBOL_GPL(mt76_update_survey);
-void mt76_set_channel(struct mt76_phy *phy)
+int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ bool offchannel)
{
struct mt76_dev *dev = phy->dev;
- struct ieee80211_hw *hw = phy->hw;
- struct cfg80211_chan_def *chandef = &hw->conf.chandef;
- bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
int timeout = HZ / 5;
+ int ret;
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mutex_lock(&dev->mutex);
+ set_bit(MT76_RESET, &phy->state);
+
+ mt76_worker_disable(&dev->tx_worker);
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(phy);
@@ -946,14 +951,34 @@ void mt76_set_channel(struct mt76_phy *phy)
phy->chandef = *chandef;
phy->chan_state = mt76_channel_state(phy, chandef->chan);
+ phy->offchannel = offchannel;
if (!offchannel)
phy->main_chan = chandef->chan;
if (chandef->chan != phy->main_chan)
memset(phy->chan_state, 0, sizeof(*phy->chan_state));
+ mt76_worker_enable(&dev->tx_worker);
+
+ ret = dev->drv->set_channel(phy);
+
+ clear_bit(MT76_RESET, &phy->state);
+ mt76_worker_schedule(&dev->tx_worker);
+
+ mutex_unlock(&dev->mutex);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(mt76_set_channel);
+
+int mt76_update_channel(struct mt76_phy *phy)
+{
+ struct ieee80211_hw *hw = phy->hw;
+ struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+ bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
+
+ return mt76_set_channel(phy, chandef, offchannel);
+}
+EXPORT_SYMBOL_GPL(mt76_update_channel);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
@@ -1484,21 +1509,32 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
+ enum mt76_sta_event ev;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE)
return mt76_sta_add(phy, vif, sta);
- if (old_state == IEEE80211_STA_AUTH &&
- new_state == IEEE80211_STA_ASSOC &&
- dev->drv->sta_assoc)
- dev->drv->sta_assoc(dev, vif, sta);
-
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)
mt76_sta_remove(dev, vif, sta);
- return 0;
+ if (!dev->drv->sta_event)
+ return 0;
+
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC)
+ ev = MT76_STA_EVENT_ASSOC;
+ else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED)
+ ev = MT76_STA_EVENT_AUTHORIZE;
+ else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH)
+ ev = MT76_STA_EVENT_DISASSOC;
+ else
+ return 0;
+
+ return dev->drv->sta_event(dev, vif, sta, ev);
}
EXPORT_SYMBOL_GPL(mt76_sta_state);
@@ -1521,6 +1557,7 @@ void mt76_wcid_init(struct mt76_wcid *wcid)
{
INIT_LIST_HEAD(&wcid->tx_list);
skb_queue_head_init(&wcid->tx_pending);
+ skb_queue_head_init(&wcid->tx_offchannel);
INIT_LIST_HEAD(&wcid->list);
idr_init(&wcid->pktid);
@@ -1529,7 +1566,7 @@ EXPORT_SYMBOL_GPL(mt76_wcid_init);
void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
{
- struct mt76_phy *phy = dev->phys[wcid->phy_idx];
+ struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
struct ieee80211_hw *hw;
struct sk_buff_head list;
struct sk_buff *skb;
@@ -1697,14 +1734,15 @@ int mt76_get_rate(struct mt76_dev *dev,
struct ieee80211_supported_band *sband,
int idx, bool cck)
{
+ bool is_2g = sband->band == NL80211_BAND_2GHZ;
int i, offset = 0, len = sband->n_bitrates;
if (cck) {
- if (sband != &dev->phy.sband_2g.sband)
+ if (!is_2g)
return 0;
idx &= ~BIT(2); /* short preamble */
- } else if (sband == &dev->phy.sband_2g.sband) {
+ } else if (is_2g) {
offset = 4;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
index a8cafa39a56d..98da82b74094 100644
--- a/drivers/net/wireless/mediatek/mt76/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -73,6 +73,8 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp,
struct sk_buff **ret_skb)
{
+ unsigned int retry = 0;
+ struct sk_buff *orig_skb = NULL;
unsigned long expires;
int ret, seq;
@@ -81,6 +83,14 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
mutex_lock(&dev->mcu.mutex);
+ if (dev->mcu_ops->mcu_skb_prepare_msg) {
+ ret = dev->mcu_ops->mcu_skb_prepare_msg(dev, skb, cmd, &seq);
+ if (ret < 0)
+ goto out;
+ }
+
+retry:
+ orig_skb = skb_get(skb);
ret = dev->mcu_ops->mcu_skb_send_msg(dev, skb, cmd, &seq);
if (ret < 0)
goto out;
@@ -94,6 +104,14 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
do {
skb = mt76_mcu_get_response(dev, expires);
+ if (!skb && !test_bit(MT76_MCU_RESET, &dev->phy.state) &&
+ retry++ < dev->mcu_ops->max_retry) {
+ dev_err(dev->dev, "Retry message %08x (seq %d)\n",
+ cmd, seq);
+ skb = orig_skb;
+ goto retry;
+ }
+
ret = dev->mcu_ops->mcu_parse_response(dev, cmd, skb, seq);
if (!ret && ret_skb)
*ret_skb = skb;
@@ -101,7 +119,9 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
dev_kfree_skb(skb);
} while (ret == -EAGAIN);
+
out:
+ dev_kfree_skb(orig_skb);
mutex_unlock(&dev->mcu.mutex);
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 4a58a78d5ed2..0b75a45ad2e8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -162,8 +162,8 @@ enum mt76_dfs_state {
struct mt76_queue_buf {
dma_addr_t addr;
- u16 len;
- bool skip_unmap;
+ u16 len:15,
+ skip_unmap:1;
};
struct mt76_tx_info {
@@ -230,11 +230,14 @@ struct mt76_queue {
};
struct mt76_mcu_ops {
+ unsigned int max_retry;
u32 headroom;
u32 tailroom;
int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp);
+ int (*mcu_skb_prepare_msg)(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, int *seq);
int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, int *seq);
int (*mcu_parse_response)(struct mt76_dev *dev, int cmd,
@@ -347,6 +350,7 @@ struct mt76_wcid {
u8 hw_key_idx2;
u8 sta:1;
+ u8 sta_disabled:1;
u8 amsdu:1;
u8 phy_idx:2;
u8 link_id:4;
@@ -361,6 +365,7 @@ struct mt76_wcid {
struct list_head tx_list;
struct sk_buff_head tx_pending;
+ struct sk_buff_head tx_offchannel;
struct list_head list;
struct idr pktid;
@@ -466,6 +471,12 @@ enum {
MT76_STATE_WED_RESET,
};
+enum mt76_sta_event {
+ MT76_STA_EVENT_ASSOC,
+ MT76_STA_EVENT_AUTHORIZE,
+ MT76_STA_EVENT_DISASSOC,
+};
+
struct mt76_hw_cap {
bool has_2ghz;
bool has_5ghz;
@@ -487,6 +498,7 @@ struct mt76_driver_ops {
u8 mcs_rates;
void (*update_survey)(struct mt76_phy *phy);
+ int (*set_channel)(struct mt76_phy *phy);
int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
@@ -511,8 +523,8 @@ struct mt76_driver_ops {
int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
- void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+ int (*sta_event)(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -768,6 +780,7 @@ struct mt76_phy {
struct cfg80211_chan_def chandef;
struct ieee80211_channel *main_chan;
+ bool offchannel;
struct mt76_channel_state *chan_state;
enum mt76_dfs_state dfs_state;
@@ -1370,7 +1383,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw,
enum ieee80211_frame_release_type reason,
bool more_data);
bool mt76_has_tx_pending(struct mt76_phy *phy);
-void mt76_set_channel(struct mt76_phy *phy);
+int mt76_update_channel(struct mt76_phy *phy);
void mt76_update_survey(struct mt76_phy *phy);
void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
@@ -1484,6 +1497,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
void mt76_testmode_tx_pending(struct mt76_phy *phy);
void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e);
+int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ bool offchannel);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index c223f7c19e6d..6457ee06bb5a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -107,7 +107,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
struct sk_buff *skb;
int i, nframes;
- if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (dev->mphy.offchannel)
return;
data.dev = dev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index ea017f22fff2..863e5770df51 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -29,7 +29,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
struct ieee80211_sta *sta;
struct mt7603_sta *msta;
struct mt76_wcid *wcid;
- u8 tid = 0, hwq = 0;
+ u8 qid, tid = 0, hwq = 0;
void *priv;
int idx;
u32 val;
@@ -57,7 +57,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
if (ieee80211_is_data_qos(hdr->frame_control)) {
tid = *ieee80211_get_qos_ctl(hdr) &
IEEE80211_QOS_CTL_TAG1D_MASK;
- u8 qid = tid_to_ac[tid];
+ qid = tid_to_ac[tid];
hwq = wmm_queue_map[qid];
skb_set_queue_mapping(skb, qid);
} else if (ieee80211_is_data(hdr->frame_control)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
index d951cb81df83..f5a6b03bc61d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
@@ -181,6 +181,7 @@ int mt7603_eeprom_init(struct mt7603_dev *dev)
is_mt7688(dev))
dev->mphy.antenna_mask = 1;
+ dev->mphy.chainmask = dev->mphy.antenna_mask;
mt76_eeprom_override(&dev->mphy);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 6c55c72f28a2..86617a3e4328 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -15,9 +15,10 @@ const struct mt76_driver_ops mt7603_drv_ops = {
.rx_poll_complete = mt7603_rx_poll_complete,
.sta_ps = mt7603_sta_ps,
.sta_add = mt7603_sta_add,
- .sta_assoc = mt7603_sta_assoc,
+ .sta_event = mt7603_sta_event,
.sta_remove = mt7603_sta_remove,
.update_survey = mt7603_update_channel,
+ .set_channel = mt7603_set_channel,
};
static void
@@ -456,11 +457,13 @@ mt7603_init_txpower(struct mt7603_dev *dev,
int target_power = eeprom[MT_EE_TX_POWER_0_START_2G + 2] & ~BIT(7);
u8 *rate_power = &eeprom[MT_EE_TX_POWER_CCK];
bool ext_pa = eeprom[MT_EE_NIC_CONF_0 + 1] & BIT(1);
+ u8 ext_pa_pwr;
int max_offset, cur_offset;
int i;
- if (ext_pa && is_mt7603(dev))
- target_power = eeprom[MT_EE_TX_POWER_TSSI_OFF] & ~BIT(7);
+ ext_pa_pwr = eeprom[MT_EE_TX_POWER_TSSI_OFF];
+ if (ext_pa && is_mt7603(dev) && ext_pa_pwr != 0 && ext_pa_pwr != 0xff)
+ target_power = ext_pa_pwr & ~BIT(7);
if (target_power & BIT(6))
target_power = -(target_power & GENMASK(5, 0));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index f35fa643c0da..574f74ad325d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -133,30 +133,24 @@ void mt7603_init_edcca(struct mt7603_dev *dev)
mt7603_edcca_set_strict(dev, false);
}
-static int
-mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def)
+int mt7603_set_channel(struct mt76_phy *mphy)
{
- struct mt7603_dev *dev = hw->priv;
+ struct mt7603_dev *dev = container_of(mphy->dev, struct mt7603_dev, mt76);
+ struct cfg80211_chan_def *def = &mphy->chandef;
+
u8 *rssi_data = (u8 *)dev->mt76.eeprom.data;
int idx, ret;
u8 bw = MT_BW_20;
bool failed = false;
- ieee80211_stop_queues(hw);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mphy.state);
-
mt7603_beacon_set_timer(dev, -1, 0);
- mt76_set_channel(&dev->mphy);
mt7603_mac_stop(dev);
if (def->width == NL80211_CHAN_WIDTH_40)
bw = MT_BW_40;
- dev->mphy.chandef = *def;
mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw);
ret = mt7603_mcu_set_channel(dev);
if (ret) {
@@ -180,10 +174,6 @@ mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def)
mt7603_mac_set_timing(dev);
mt7603_mac_start(dev);
- clear_bit(MT76_RESET, &dev->mphy.state);
-
- mt76_txq_schedule_all(&dev->mphy);
-
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
msecs_to_jiffies(MT7603_WATCHDOG_TIME));
@@ -199,17 +189,14 @@ mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def)
mt7603_init_edcca(dev);
out:
- if (!(mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ if (!mphy->offchannel)
mt7603_beacon_set_timer(dev, -1, dev->mt76.beacon_int);
- mutex_unlock(&dev->mt76.mutex);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
if (failed)
mt7603_mac_work(&dev->mphy.mac_work.work);
- ieee80211_wake_queues(hw);
-
return ret;
}
@@ -227,7 +214,7 @@ static int mt7603_set_sar_specs(struct ieee80211_hw *hw,
if (err)
return err;
- return mt7603_set_channel(hw, &mphy->chandef);
+ return mt76_update_channel(mphy);
}
static int
@@ -238,7 +225,7 @@ mt7603_config(struct ieee80211_hw *hw, u32 changed)
if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
IEEE80211_CONF_CHANGE_POWER))
- ret = mt7603_set_channel(hw, &hw->conf.chandef);
+ ret = mt76_update_channel(&dev->mphy);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
mutex_lock(&dev->mt76.mutex);
@@ -368,13 +355,19 @@ mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
return ret;
}
-void
-mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int
+mt7603_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
- mt7603_wtbl_update_cap(dev, sta);
+ if (ev == MT76_STA_EVENT_ASSOC) {
+ mutex_lock(&dev->mt76.mutex);
+ mt7603_wtbl_update_cap(dev, sta);
+ mutex_unlock(&dev->mt76.mutex);
+ }
+
+ return 0;
}
void
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index 9e58df7042ad..55a034ccbacd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -213,6 +213,7 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev);
void mt7603_pse_client_reset(struct mt7603_dev *dev);
+int mt7603_set_channel(struct mt76_phy *mphy);
int mt7603_mcu_set_channel(struct mt7603_dev *dev);
int mt7603_mcu_set_eeprom(struct mt7603_dev *dev);
void mt7603_mcu_exit(struct mt7603_dev *dev);
@@ -245,8 +246,8 @@ void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-void mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+int mt7603_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index f7722f67db57..66ba3be27343 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -56,6 +56,9 @@ int mt7615_thermal_init(struct mt7615_dev *dev)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7615_%s",
wiphy_name(wiphy));
+ if (!name)
+ return -ENOMEM;
+
hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, dev,
mt7615_hwmon_groups);
return PTR_ERR_OR_ZERO(hwmon);
@@ -319,7 +322,7 @@ void mt7615_init_work(struct mt7615_dev *dev)
mt7615_mcu_set_eeprom(dev);
mt7615_mac_init(dev);
mt7615_phy_init(dev);
- mt7615_mcu_del_wtbl_all(dev);
+ mt76_connac_mcu_del_wtbl_all(&dev->mt76);
mt7615_check_offload_capability(dev);
}
EXPORT_SYMBOL_GPL(mt7615_init_work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 50e262c1622f..376975388007 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -282,19 +282,14 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
mt76_wcid_cleanup(&dev->mt76, &mvif->sta.wcid);
}
-int mt7615_set_channel(struct mt7615_phy *phy)
+int mt7615_set_channel(struct mt76_phy *mphy)
{
+ struct mt7615_phy *phy = mphy->priv;
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mt7615_mutex_acquire(dev);
-
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
+ mt76_connac_pm_wake(mphy, &dev->pm);
if (is_mt7615(&dev->mt76) && dev->flash_eeprom) {
ret = mt7615_mcu_apply_rx_dcoc(phy);
@@ -325,11 +320,8 @@ int mt7615_set_channel(struct mt7615_phy *phy)
phy->chfreq = mt76_rr(dev, MT_CHFREQ(ext_phy));
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
+ mt76_connac_power_save_sched(mphy, &dev->pm);
- mt7615_mutex_release(dev);
-
- mt76_worker_schedule(&dev->mt76.tx_worker);
if (!mt76_testmode_enabled(phy->mt76)) {
unsigned long timeout = mt7615_get_macwork_timeout(dev);
@@ -339,6 +331,7 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(mt7615_set_channel);
static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
@@ -425,11 +418,7 @@ static int mt7615_set_sar_specs(struct ieee80211_hw *hw,
if (mt7615_firmware_offload(phy->dev))
return mt76_connac_mcu_set_rate_txpower(phy->mt76);
- ieee80211_stop_queues(hw);
- err = mt7615_set_channel(phy);
- ieee80211_wake_queues(hw);
-
- return err;
+ return mt76_update_channel(phy->mt76);
}
static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
@@ -448,9 +437,7 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
mt7615_mutex_release(dev);
}
#endif
- ieee80211_stop_queues(hw);
- ret = mt7615_set_channel(phy);
- ieee80211_wake_queues(hw);
+ ret = mt76_update_channel(phy->mt76);
}
mt7615_mutex_acquire(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index d50d967828be..96e34277fece 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -394,7 +394,7 @@ mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
if (mt76_phy_dfs_state(mphy) < MT_DFS_STATE_CAC)
return;
- ieee80211_radar_detected(mphy->hw);
+ ieee80211_radar_detected(mphy->hw, NULL);
dev->hw_pattern++;
}
@@ -847,6 +847,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct wtbl_req_hdr *wtbl_hdr;
struct mt7615_sta *msta;
bool new_entry = true;
+ int conn_state;
int cmd, err;
msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
@@ -863,8 +864,9 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
else
mvif->sta_added = true;
}
+ conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, link_sta,
- enable, new_entry);
+ conn_state, new_entry);
if (enable && sta)
mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
MT76_STA_INFO_STATE_ASSOC);
@@ -1878,16 +1880,6 @@ out:
sizeof(req), true);
}
-int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
-{
- struct wtbl_req_hdr req = {
- .operation = WTBL_RESET_ALL,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(WTBL_UPDATE),
- &req, sizeof(req), true);
-}
-
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
{
struct {
@@ -2151,7 +2143,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ else if (phy->mt76->offchannel)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
NL80211_IFTYPE_AP))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index 87a956ea3ad7..dbb2c82407df 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -182,6 +182,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
+ .set_channel = mt7615_set_channel,
};
struct mt76_bus_ops *bus_ops;
struct ieee80211_ops *ops;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index a20322aae967..530da48ce3ea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -399,7 +399,6 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *rates);
void mt7615_pm_wake_work(struct work_struct *work);
void mt7615_pm_power_save_work(struct work_struct *work);
-int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
const struct ieee80211_tx_queue_params *params);
@@ -457,7 +456,7 @@ void mt7615_roc_work(struct work_struct *work);
void mt7615_roc_timer(struct timer_list *timer);
void mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband);
-int mt7615_set_channel(struct mt7615_phy *phy);
+int mt7615_set_channel(struct mt76_phy *mphy);
void mt7615_init_work(struct mt7615_dev *dev);
int mt7615_mcu_restart(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 9692890ba51b..aebfc4576aa4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -87,6 +87,7 @@ static int mt7663s_probe(struct sdio_func *func,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
+ .set_channel = mt7615_set_channel,
};
static const struct mt76_bus_ops mt7663s_ops = {
.rr = mt76s_rr,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
index a3d1cfa729ed..03f5af84424b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
@@ -141,7 +141,7 @@ mt7615_tm_init(struct mt7615_phy *phy)
mt7615_mcu_set_sku_en(phy, phy->mt76->test.state == MT76_TM_STATE_OFF);
mutex_unlock(&dev->mt76.mutex);
- mt7615_set_channel(phy);
+ mt76_update_channel(phy->mt76);
mt7615_ops.configure_filter(phy->mt76->hw, 0, &total_flags, 0);
mutex_lock(&dev->mt76.mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 9335ca0776fe..5020af52c68c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -123,6 +123,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
+ .set_channel = mt7615_set_channel,
};
static struct mt76_bus_ops bus_ops = {
.rr = mt7663u_rr,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
index 5f132115ebfc..eb4765365b8c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
@@ -355,4 +355,11 @@ enum tx_port_idx {
MT_TX_PORT_IDX_MCU
};
+enum tx_frag_idx {
+ MT_TX_FRAG_NONE,
+ MT_TX_FRAG_FIRST,
+ MT_TX_FRAG_MID,
+ MT_TX_FRAG_LAST
+};
+
#endif /* __MT76_CONNAC2_MAC_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
index 353e66069840..db0c29e65185 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
@@ -28,8 +28,6 @@ enum {
#define MT_RXD0_MESH BIT(18)
#define MT_RXD0_MHCP BIT(19)
#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
-#define MT_RXD0_NORMAL_IP_SUM BIT(23)
-#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
#define MT_RXD0_SW_PKT_TYPE_MASK GENMASK(31, 16)
#define MT_RXD0_SW_PKT_TYPE_MAP 0x380F
@@ -80,6 +78,8 @@ enum {
#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
#define MT_RXD3_NORMAL_CO_ANT BIT(22)
#define MT_RXD3_NORMAL_FCS_ERR BIT(24)
+#define MT_RXD3_NORMAL_IP_SUM BIT(26)
+#define MT_RXD3_NORMAL_UDP_TCP_SUM BIT(27)
#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
/* RXD DW4 */
@@ -197,6 +197,13 @@ enum tx_mgnt_type {
MT_TX_ADDBA,
};
+enum tx_frag_idx {
+ MT_TX_FRAG_NONE,
+ MT_TX_FRAG_FIRST,
+ MT_TX_FRAG_MID,
+ MT_TX_FRAG_LAST
+};
+
#define MT_CT_INFO_APPLY_TXD BIT(0)
#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
#define MT_CT_INFO_MGMT_FRAME BIT(2)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index b841bf628d02..a3db65254e37 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -391,6 +391,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
bool multicast = is_multicast_ether_addr(hdr->addr1);
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
__le16 fc = hdr->frame_control;
+ __le16 sc = hdr->seq_ctrl;
u8 fc_type, fc_stype;
u32 val;
@@ -432,6 +433,13 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
info->flags & IEEE80211_TX_CTL_USE_MINRATE)
val |= MT_TXD2_FIX_RATE;
+ if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
+ else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
+ else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
+
txwi[2] |= cpu_to_le32(val);
if (ieee80211_is_beacon(fc)) {
@@ -440,7 +448,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
}
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(hdr->seq_ctrl);
+ u16 seqno = le16_to_cpu(sc);
if (ieee80211_is_back_req(hdr->frame_control)) {
struct ieee80211_bar *bar;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 4dce03ddbfa4..864246f94088 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -283,7 +283,7 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
};
struct sk_buff *skb;
- if (wcid && !wcid->sta)
+ if (wcid && !wcid->sta && !wcid->sta_disabled)
hdr.muar_idx = 0xe;
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
@@ -371,7 +371,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
- bool enable, bool newly)
+ int conn_state, bool newly)
{
struct sta_rec_basic *basic;
struct tlv *tlv;
@@ -382,13 +382,9 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
basic = (struct sta_rec_basic *)tlv;
basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
- if (enable) {
- if (newly)
- basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
- basic->conn_state = CONN_STATE_PORT_SECURE;
- } else {
- basic->conn_state = CONN_STATE_DISCONNECT;
- }
+ if (newly && conn_state != CONN_STATE_DISCONNECT)
+ basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
+ basic->conn_state = conn_state;
if (!link_sta) {
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
@@ -1051,15 +1047,18 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
struct wtbl_req_hdr *wtbl_hdr;
struct tlv *sta_wtbl;
struct sk_buff *skb;
+ int conn_state;
skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
+ conn_state = info->enable ? CONN_STATE_PORT_SECURE :
+ CONN_STATE_DISCONNECT;
link_sta = info->sta ? &info->sta->deflink : NULL;
if (info->sta || !info->offload_fw)
mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
- link_sta, info->enable,
+ link_sta, conn_state,
info->newly);
if (info->sta && info->enable)
mt76_connac_mcu_sta_tlv(phy, skb, info->sta,
@@ -2850,6 +2849,17 @@ int mt76_connac_mcu_restart(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_restart);
+int mt76_connac_mcu_del_wtbl_all(struct mt76_dev *dev)
+{
+ struct wtbl_req_hdr req = {
+ .operation = WTBL_RESET_ALL,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD(WTBL_UPDATE),
+ &req, sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_del_wtbl_all);
+
int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
u8 rx_sel, u8 val)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 4242d436de26..1b0e80dfc346 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -115,21 +115,26 @@ struct mt76_connac2_mcu_uni_txd {
} __packed __aligned(4);
struct mt76_connac2_mcu_rxd {
- __le32 rxd[6];
+ /* New members MUST be added within the struct_group() macro below. */
+ struct_group_tagged(mt76_connac2_mcu_rxd_hdr, hdr,
+ __le32 rxd[6];
- __le16 len;
- __le16 pkt_type_id;
+ __le16 len;
+ __le16 pkt_type_id;
- u8 eid;
- u8 seq;
- u8 option;
- u8 rsv;
- u8 ext_eid;
- u8 rsv1[2];
- u8 s2d_index;
+ u8 eid;
+ u8 seq;
+ u8 option;
+ u8 rsv;
+ u8 ext_eid;
+ u8 rsv1[2];
+ u8 s2d_index;
+ );
u8 tlv[];
};
+static_assert(offsetof(struct mt76_connac2_mcu_rxd, tlv) == sizeof(struct mt76_connac2_mcu_rxd_hdr),
+ "struct member likely outside of struct_group_tagged()");
struct mt76_connac2_patch_hdr {
char build_date[16];
@@ -1898,7 +1903,7 @@ int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
- bool enable, bool newly);
+ int state, bool newly);
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, void *sta_wtbl,
@@ -2032,6 +2037,7 @@ void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
void *sta_wtbl, void *wtbl_tlv);
int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter);
int mt76_connac_mcu_restart(struct mt76_dev *dev);
+int mt76_connac_mcu_del_wtbl_all(struct mt76_dev *dev);
int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
u8 rx_sel, u8 val);
int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index bcd24c9072ec..4de45a56812d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -10,7 +10,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "mt76x0.h"
#include "eeprom.h"
#include "../mt76x02_phy.h"
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index 07380cce8755..4aa2dcedc874 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -8,16 +8,15 @@
#include <linux/etherdevice.h>
#include "mt76x0.h"
-static void
-mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+int mt76x0_set_channel(struct mt76_phy *mphy)
{
- cancel_delayed_work_sync(&dev->cal_work);
+ struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
+
mt76x02_pre_tbtt_enable(dev, false);
if (mt76_is_mmio(&dev->mt76))
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- mt76_set_channel(&dev->mphy);
- mt76x0_phy_set_channel(dev, chandef);
+ mt76x0_phy_set_channel(dev, &mphy->chandef);
mt76x02_mac_cc_reset(dev);
mt76x02_edcca_init(dev);
@@ -28,8 +27,9 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
}
mt76x02_pre_tbtt_enable(dev, true);
- mt76_txq_schedule_all(&dev->mphy);
+ return 0;
}
+EXPORT_SYMBOL_GPL(mt76x0_set_channel);
int mt76x0_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar)
@@ -61,13 +61,10 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ mt76_update_channel(&dev->mphy);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- mt76x0_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
- }
+ mutex_lock(&dev->mt76.mutex);
if (changed & IEEE80211_CONF_CHANGE_POWER) {
struct mt76_phy *mphy = &dev->mphy;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index 99dcb8feb9f7..50f755344968 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -49,6 +49,7 @@ void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
void mt76x0_mac_stop(struct mt76x02_dev *dev);
int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
+int mt76x0_set_channel(struct mt76_phy *mphy);
int mt76x0_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 2ecee7c5c80d..1eb955f3ca13 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -159,6 +159,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x0_set_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 390f502e97f0..b031c500b741 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -217,6 +217,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
.drv_flags = MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x0_set_channel,
.tx_prepare_skb = mt76x02u_tx_prepare_skb,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index 024a5c0a5a57..7a07636d09c6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -630,7 +630,7 @@ static void mt76x02_dfs_tasklet(struct tasklet_struct *t)
radar_detected = mt76x02_dfs_check_detection(dev);
if (radar_detected) {
/* sw detector rx radar pattern */
- ieee80211_radar_detected(dev->mt76.hw);
+ ieee80211_radar_detected(dev->mt76.hw, NULL);
mt76x02_dfs_detector_reset(dev);
return;
@@ -658,7 +658,7 @@ static void mt76x02_dfs_tasklet(struct tasklet_struct *t)
/* hw detector rx radar pattern */
dfs_pd->stats[i].hw_pattern++;
- ieee80211_radar_detected(dev->mt76.hw);
+ ieee80211_radar_detected(dev->mt76.hw, NULL);
mt76x02_dfs_detector_reset(dev);
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
index 5d402cf2951c..a5e3392c0b48 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
@@ -4,7 +4,7 @@
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "mt76x02_eeprom.h"
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 35b7ebc2c9c6..4a49a3036a46 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -22,7 +22,7 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t)
struct sk_buff *skb;
int i;
- if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (dev->mphy.offchannel)
return;
__skb_queue_head_init(&data.q);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 29b9a15f8dbe..0e1ede9314d8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -188,10 +188,7 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work)
struct sk_buff *skb;
int nbeacons;
- if (!dev->mt76.beacon_mask)
- return;
-
- if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (!dev->mt76.beacon_mask || dev->mphy.offchannel)
return;
__skb_queue_head_init(&data.q);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index 1fe5f5a02f93..156b16c17b2b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -5,7 +5,7 @@
#include <linux/module.h>
#include <linux/of.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "mt76x2.h"
#include "eeprom.h"
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index be1217329a77..f051721bb00e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -47,6 +47,8 @@ void mt76x2_phy_power_on(struct mt76x02_dev *dev);
void mt76x2_stop_hardware(struct mt76x02_dev *dev);
int mt76x2_eeprom_init(struct mt76x02_dev *dev);
int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel);
+int mt76x2e_set_channel(struct mt76_phy *phy);
+int mt76x2u_set_channel(struct mt76_phy *phy);
void mt76x2_phy_set_antenna(struct mt76x02_dev *dev);
int mt76x2_phy_start(struct mt76x02_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 30959746e924..67c9d1caa0bd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -25,6 +25,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x2e_set_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 6accea551319..eb70130d2711 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -32,33 +32,25 @@ mt76x2_stop(struct ieee80211_hw *hw, bool suspend)
mt76x2_stop_hardware(dev);
}
-static void
-mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+int mt76x2e_set_channel(struct mt76_phy *phy)
{
- cancel_delayed_work_sync(&dev->cal_work);
+ struct mt76x02_dev *dev = container_of(phy->dev, struct mt76x02_dev, mt76);
+
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mphy.state);
-
- mt76_set_channel(&dev->mphy);
-
mt76x2_mac_stop(dev, true);
- mt76x2_phy_set_channel(dev, chandef);
+ mt76x2_phy_set_channel(dev, &phy->chandef);
mt76x02_mac_cc_reset(dev);
mt76x02_dfs_init_params(dev);
mt76x2_mac_resume(dev);
- clear_bit(MT76_RESET, &dev->mphy.state);
- mutex_unlock(&dev->mt76.mutex);
-
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
- mt76_txq_schedule_all(&dev->mphy);
+ return 0;
}
static int
@@ -95,11 +87,8 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&dev->mt76.mutex);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- mt76x2_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
- }
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ mt76_update_channel(&dev->mphy);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index e92bb871f231..e832ad53e239 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -32,6 +32,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
.drv_flags = MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x2u_set_channel,
.tx_prepare_skb = mt76x02u_tx_prepare_skb,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index ba0241c36672..83e7061b10e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -31,32 +31,20 @@ static void mt76x2u_stop(struct ieee80211_hw *hw, bool suspend)
mt76x2u_stop_hw(dev);
}
-static int
-mt76x2u_set_channel(struct mt76x02_dev *dev,
- struct cfg80211_chan_def *chandef)
+int mt76x2u_set_channel(struct mt76_phy *mphy)
{
+ struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
int err;
- cancel_delayed_work_sync(&dev->cal_work);
mt76x02_pre_tbtt_enable(dev, false);
-
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mphy.state);
-
- mt76_set_channel(&dev->mphy);
-
mt76x2_mac_stop(dev, false);
- err = mt76x2u_phy_set_channel(dev, chandef);
+ err = mt76x2u_phy_set_channel(dev, &mphy->chandef);
mt76x02_mac_cc_reset(dev);
mt76x2_mac_resume(dev);
- clear_bit(MT76_RESET, &dev->mphy.state);
- mutex_unlock(&dev->mt76.mutex);
-
mt76x02_pre_tbtt_enable(dev, true);
- mt76_txq_schedule_all(&dev->mphy);
return err;
}
@@ -93,11 +81,8 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&dev->mt76.mutex);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- err = mt76x2u_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
- }
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ mt76_update_channel(&dev->mphy);
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index a978f434dc5e..6bef96e3d2a3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -194,6 +194,8 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7915_%s",
wiphy_name(wiphy));
+ if (!name)
+ return -ENOMEM;
cdev = thermal_cooling_device_register(name, phy, &mt7915_thermal_ops);
if (!IS_ERR(cdev)) {
@@ -398,6 +400,7 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
hw->max_tx_fragments = 4;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 8008ce3fa6c7..cf77ce0c8759 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -1448,6 +1448,7 @@ mt7915_mac_full_reset(struct mt7915_dev *dev)
dev->recovery.hw_full_reset = true;
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
ieee80211_stop_queues(mt76_hw(dev));
if (ext_phy)
@@ -1462,26 +1463,27 @@ mt7915_mac_full_reset(struct mt7915_dev *dev)
if (!mt7915_mac_restart(dev))
break;
}
- mutex_unlock(&dev->mt76.mutex);
if (i == 10)
dev_err(dev->mt76.dev, "chip full reset failed\n");
- ieee80211_restart_hw(mt76_hw(dev));
- if (ext_phy)
- ieee80211_restart_hw(ext_phy->hw);
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ while (!list_empty(&dev->mt76.sta_poll_list))
+ list_del_init(dev->mt76.sta_poll_list.next);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
- ieee80211_wake_queues(mt76_hw(dev));
- if (ext_phy)
- ieee80211_wake_queues(ext_phy->hw);
+ memset(dev->mt76.wcid_mask, 0, sizeof(dev->mt76.wcid_mask));
+ dev->mt76.vif_mask = 0;
+ i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
+ dev->mt76.global_wcid.idx = i;
dev->recovery.hw_full_reset = false;
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
- MT7915_WATCHDOG_TIME);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ ieee80211_restart_hw(mt76_hw(dev));
if (ext_phy)
- ieee80211_queue_delayed_work(ext_phy->hw,
- &ext_phy->mac_work,
- MT7915_WATCHDOG_TIME);
+ ieee80211_restart_hw(ext_phy->hw);
}
/* system error recovery */
@@ -1537,12 +1539,14 @@ void mt7915_mac_reset_work(struct work_struct *work)
set_bit(MT76_RESET, &phy2->mt76->state);
cancel_delayed_work_sync(&phy2->mt76->mac_work);
}
+
+ mutex_lock(&dev->mt76.mutex);
+
mt76_worker_disable(&dev->mt76.tx_worker);
mt76_for_each_q_rx(&dev->mt76, i)
napi_disable(&dev->mt76.napi[i]);
napi_disable(&dev->mt76.tx_napi);
- mutex_lock(&dev->mt76.mutex);
if (mtk_wed_device_active(&dev->mt76.mmio.wed))
mtk_wed_device_stop(&dev->mt76.mmio.wed);
@@ -1692,6 +1696,11 @@ void mt7915_reset(struct mt7915_dev *dev)
return;
}
+ if ((READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) {
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ }
+
queue_work(dev->mt76.wq, &dev->reset_work);
wake_up(&dev->reset_wait);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 049223df9beb..d75e8dea1fbd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -245,7 +245,9 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
- idx = MT7915_WTBL_RESERVED - mvif->mt76.idx;
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, mt7915_wtbl_size(dev));
+ if (idx < 0)
+ return -ENOSPC;
INIT_LIST_HEAD(&mvif->sta.rc_list);
INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
@@ -272,7 +274,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
memset(&mvif->cap, -1, sizeof(mvif->cap));
mt7915_mcu_add_bss_info(phy, vif, true);
- mt7915_mcu_add_sta(dev, vif, NULL, true);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, true);
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
out:
@@ -291,7 +293,8 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
int idx = msta->wcid.idx;
mt7915_mcu_add_bss_info(phy, vif, false);
- mt7915_mcu_add_sta(dev, vif, NULL, false);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false);
+ mt76_wcid_mask_clear(dev->mt76.wcid_mask, mvif->sta.wcid.idx);
mutex_lock(&dev->mt76.mutex);
mt76_testmode_reset(phy->mt76, true);
@@ -317,18 +320,12 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
}
-int mt7915_set_channel(struct mt7915_phy *phy)
+int mt7915_set_channel(struct mt76_phy *mphy)
{
+ struct mt7915_phy *phy = mphy->priv;
struct mt7915_dev *dev = phy->dev;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
-
if (dev->cal) {
ret = mt7915_mcu_apply_tx_dpd(phy);
if (ret)
@@ -347,11 +344,6 @@ int mt7915_set_channel(struct mt7915_phy *phy)
phy->noise = 0;
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
- mutex_unlock(&dev->mt76.mutex);
-
- mt76_txq_schedule_all(phy->mt76);
-
if (!mt76_testmode_enabled(phy->mt76))
ieee80211_queue_delayed_work(phy->mt76->hw,
&phy->mt76->mac_work,
@@ -374,6 +366,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int idx = key->keyidx;
int err = 0;
+ if (sta && !wcid->sta)
+ return -EOPNOTSUPP;
+
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
*/
@@ -464,11 +459,9 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&dev->mt76.mutex);
}
#endif
- ieee80211_stop_queues(hw);
- ret = mt7915_set_channel(phy);
+ ret = mt76_update_channel(phy->mt76);
if (ret)
return ret;
- ieee80211_wake_queues(hw);
}
if (changed & (IEEE80211_CONF_CHANGE_POWER |
@@ -564,8 +557,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
MT_WF_RFCR_DROP_RTS |
- MT_WF_RFCR_DROP_CTL_RSV |
- MT_WF_RFCR_DROP_NDPA);
+ MT_WF_RFCR_DROP_CTL_RSV);
*total_flags = flags;
rxfilter = phy->rxfilter;
@@ -633,7 +625,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
if (set_bss_info == 1)
mt7915_mcu_add_bss_info(phy, vif, true);
if (set_sta == 1)
- mt7915_mcu_add_sta(dev, vif, NULL, true);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, false);
if (changed & BSS_CHANGED_ERP_CTS_PROT)
mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot);
@@ -668,7 +660,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
if (set_bss_info == 0)
mt7915_mcu_add_bss_info(phy, vif, false);
if (set_sta == 0)
- mt7915_mcu_add_sta(dev, vif, NULL, false);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false);
mutex_unlock(&dev->mt76.mutex);
}
@@ -706,7 +698,7 @@ mt7915_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
err = mt7915_mcu_add_bss_info(phy, vif, true);
if (err)
goto out;
- err = mt7915_mcu_add_sta(dev, vif, NULL, true);
+ err = mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, false);
out:
mutex_unlock(&dev->mt76.mutex);
@@ -720,7 +712,7 @@ mt7915_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7915_dev *dev = mt7915_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7915_mcu_add_sta(dev, vif, NULL, false);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false);
mutex_unlock(&dev->mt76.mutex);
}
@@ -743,8 +735,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
bool ext_phy = mvif->phy != &dev->phy;
- int ret, idx;
- u32 addr;
+ int idx;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
if (idx < 0)
@@ -753,25 +744,61 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
INIT_LIST_HEAD(&msta->rc_list);
INIT_LIST_HEAD(&msta->wcid.poll_list);
msta->vif = mvif;
- msta->wcid.sta = 1;
+ msta->wcid.sta_disabled = 1;
msta->wcid.idx = idx;
msta->wcid.phy_idx = ext_phy;
- msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->jiffies = jiffies;
ewma_avg_signal_init(&msta->avg_ack_signal);
mt7915_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_DISCONNECT, true);
- ret = mt7915_mcu_add_sta(dev, vif, sta, true);
- if (ret)
- return ret;
+ return 0;
+}
- addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 30);
- mt76_rmw_field(dev, addr, GENMASK(7, 0), 0xa0);
+int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ int i, ret;
+ u32 addr;
+
+ switch (ev) {
+ case MT76_STA_EVENT_ASSOC:
+ ret = mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_CONNECT, true);
+ if (ret)
+ return ret;
+
+ addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 30);
+ mt76_rmw_field(dev, addr, GENMASK(7, 0), 0xa0);
+
+ ret = mt7915_mcu_add_rate_ctrl(dev, vif, sta, false);
+ if (ret)
+ return ret;
+
+ msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta->wcid.sta = 1;
+ msta->wcid.sta_disabled = 0;
+
+ return 0;
+
+ case MT76_STA_EVENT_AUTHORIZE:
+ return mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_PORT_SECURE, false);
- return mt7915_mcu_add_rate_ctrl(dev, vif, sta, false);
+ case MT76_STA_EVENT_DISASSOC:
+ for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
+ mt7915_mac_twt_teardown_flow(dev, msta, i);
+
+ mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_DISCONNECT, false);
+ msta->wcid.sta_disabled = 1;
+ msta->wcid.sta = 0;
+ return 0;
+ }
+
+ return 0;
}
void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@@ -779,16 +806,10 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- int i;
-
- mt7915_mcu_add_sta(dev, vif, sta, false);
mt7915_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
- mt7915_mac_twt_teardown_flow(dev, msta, i);
-
spin_lock_bh(&mdev->sta_poll_lock);
if (!list_empty(&msta->wcid.poll_list))
list_del_init(&msta->wcid.poll_list);
@@ -896,22 +917,6 @@ mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static int
-mt7915_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
- IEEE80211_STA_NONE);
-}
-
-static int
-mt7915_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
- IEEE80211_STA_NOTEXIST);
-}
-
-static int
mt7915_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -1089,8 +1094,7 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
struct rate_info *txrate = &msta->wcid.rate;
struct rate_info rxrate = {};
- if (is_mt7915(&phy->dev->mt76) &&
- !mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
+ if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
sinfo->rxrate = rxrate;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
}
@@ -1164,6 +1168,10 @@ static void mt7915_sta_rc_update(struct ieee80211_hw *hw,
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_dev *dev = phy->dev;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ if (!msta->wcid.sta)
+ return;
mt7915_sta_rc_work(&changed, sta);
ieee80211_queue_work(hw, &dev->rc_work);
@@ -1207,6 +1215,9 @@ static void mt7915_sta_set_4addr(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
+ if (!msta->wcid.sta)
+ return;
+
mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
@@ -1223,6 +1234,9 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ if (!msta->wcid.sta)
+ return;
+
mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
@@ -1579,6 +1593,12 @@ mt7915_twt_teardown_request(struct ieee80211_hw *hw,
}
static int
+mt7915_set_frag_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ return 0;
+}
+
+static int
mt7915_set_radar_background(struct ieee80211_hw *hw,
struct cfg80211_chan_def *chandef)
{
@@ -1660,6 +1680,17 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
}
#endif
+static void
+mt7915_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+
+ ieee80211_wake_queues(hw);
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
+ MT7915_WATCHDOG_TIME);
+}
+
const struct ieee80211_ops mt7915_ops = {
.add_chanctx = ieee80211_emulate_add_chanctx,
.remove_chanctx = ieee80211_emulate_remove_chanctx,
@@ -1676,8 +1707,7 @@ const struct ieee80211_ops mt7915_ops = {
.bss_info_changed = mt7915_bss_info_changed,
.start_ap = mt7915_start_ap,
.stop_ap = mt7915_stop_ap,
- .sta_add = mt7915_sta_add,
- .sta_remove = mt7915_sta_remove,
+ .sta_state = mt76_sta_state,
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.sta_rc_update = mt7915_sta_rc_update,
.set_key = mt7915_set_key,
@@ -1708,6 +1738,7 @@ const struct ieee80211_ops mt7915_ops = {
.sta_set_decap_offload = mt7915_sta_set_decap_offload,
.add_twt_setup = mt7915_mac_add_twt_setup,
.twt_teardown_request = mt7915_twt_teardown_request,
+ .set_frag_threshold = mt7915_set_frag_threshold,
CFG80211_TESTMODE_CMD(mt76_testmode_cmd)
CFG80211_TESTMODE_DUMP(mt76_testmode_dump)
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -1718,4 +1749,5 @@ const struct ieee80211_ops mt7915_ops = {
.net_fill_forward_path = mt7915_net_fill_forward_path,
.net_setup_tc = mt76_wed_net_setup_tc,
#endif
+ .reconfig_complete = mt7915_reconfig_complete,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 2185cd24e2e1..87d0dd040001 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -157,12 +157,21 @@ static int
mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq)
{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt76_connac2_mcu_rxd *rxd;
int ret = 0;
if (!skb) {
dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
cmd, seq);
+
+ if (!test_and_set_bit(MT76_MCU_RESET, &dev->mphy.state)) {
+ dev->recovery.restart = true;
+ wake_up(&dev->mt76.mcu.wait);
+ queue_work(dev->mt76.wq, &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+
return -ETIMEDOUT;
}
@@ -191,11 +200,6 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
enum mt76_mcuq_id qid;
- int ret;
-
- ret = mt76_connac2_mcu_fill_message(mdev, skb, cmd, wait_seq);
- if (ret)
- return ret;
if (cmd == MCU_CMD(FW_SCATTER))
qid = MT_MCUQ_FWDL;
@@ -293,7 +297,7 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
&dev->rdd2_chandef,
GFP_ATOMIC);
else
- ieee80211_radar_detected(mphy->hw);
+ ieee80211_radar_detected(mphy->hw, NULL);
dev->hw_pattern++;
}
@@ -690,13 +694,17 @@ int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
{
struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
struct mt7915_vif *mvif = msta->vif;
+ int ret;
+ mt76_worker_disable(&dev->mt76.tx_worker);
if (enable && !params->amsdu)
msta->wcid.amsdu = false;
+ ret = mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ MCU_EXT_CMD(STA_REC_UPDATE),
+ enable, true);
+ mt76_worker_enable(&dev->mt76.tx_worker);
- return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
- MCU_EXT_CMD(STA_REC_UPDATE),
- enable, true);
+ return ret;
}
int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
@@ -1653,7 +1661,7 @@ mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
}
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
+ struct ieee80211_sta *sta, int conn_state, bool newly)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct ieee80211_link_sta *link_sta;
@@ -1670,13 +1678,10 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta, enable,
- !rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx]));
- if (!enable)
- goto out;
-
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta,
+ conn_state, newly);
/* tag order is in accordance with firmware dependency. */
- if (sta) {
+ if (sta && conn_state != CONN_STATE_DISCONNECT) {
/* starec bfer */
mt7915_mcu_sta_bfer_tlv(dev, skb, vif, sta);
/* starec ht */
@@ -1687,12 +1692,17 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
mt76_connac_mcu_sta_uapsd(skb, vif, sta);
}
- ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
- if (ret) {
- dev_kfree_skb(skb);
- return ret;
+ if (newly || conn_state != CONN_STATE_DISCONNECT) {
+ ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
}
+ if (conn_state == CONN_STATE_DISCONNECT)
+ goto out;
+
if (sta) {
/* starec amsdu */
mt7915_mcu_sta_amsdu_tlv(dev, skb, vif, sta);
@@ -2352,6 +2362,8 @@ int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
if (ret)
return ret;
+ mt76_connac_mcu_del_wtbl_all(&dev->mt76);
+
if ((mtk_wed_device_active(&dev->mt76.mmio.wed) &&
is_mt7915(&dev->mt76)) ||
!mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
@@ -2376,7 +2388,9 @@ int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
int mt7915_mcu_init(struct mt7915_dev *dev)
{
static const struct mt76_mcu_ops mt7915_mcu_ops = {
+ .max_retry = 3,
.headroom = sizeof(struct mt76_connac2_mcu_txd),
+ .mcu_skb_prepare_msg = mt76_connac2_mcu_fill_message,
.mcu_skb_send_msg = mt7915_mcu_send_message,
.mcu_parse_response = mt7915_mcu_parse_response,
};
@@ -2747,7 +2761,7 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
+ else if (phy->mt76->offchannel ||
phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index b41ac4aaced7..49476a4182fd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -29,7 +29,7 @@ struct mt7915_mcu_thermal_ctrl {
} __packed;
struct mt7915_mcu_thermal_notify {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
struct mt7915_mcu_thermal_ctrl ctrl;
__le32 temperature;
@@ -37,7 +37,7 @@ struct mt7915_mcu_thermal_notify {
} __packed;
struct mt7915_mcu_csa_notify {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
u8 omac_idx;
u8 csa_count;
@@ -46,7 +46,7 @@ struct mt7915_mcu_csa_notify {
} __packed;
struct mt7915_mcu_bcc_notify {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
u8 band_idx;
u8 omac_idx;
@@ -55,7 +55,7 @@ struct mt7915_mcu_bcc_notify {
} __packed;
struct mt7915_mcu_rdd_report {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
u8 band_idx;
u8 long_detected;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index d6ecd698cdcd..44e112b8b5b3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -927,8 +927,10 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
.rx_check = mt7915_rx_check,
.rx_poll_complete = mt7915_rx_poll_complete,
.sta_add = mt7915_mac_sta_add,
+ .sta_event = mt7915_mac_sta_event,
.sta_remove = mt7915_mac_sta_remove,
.update_survey = mt7915_update_channel,
+ .set_channel = mt7915_set_channel,
};
struct mt7915_dev *dev;
struct mt76_dev *mdev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index a30d08eb0656..ac0b1f0eb27c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -444,7 +444,7 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, int enable);
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable);
+ struct ieee80211_sta *sta, int conn_state, bool newly);
int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
@@ -463,7 +463,7 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool changed);
int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-int mt7915_set_channel(struct mt7915_phy *phy);
+int mt7915_set_channel(struct mt76_phy *mphy);
int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd);
int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif);
int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *req);
@@ -560,6 +560,8 @@ void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
void mt7915_mac_set_timing(struct mt7915_phy *phy);
int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7915_mac_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
index 0d76ae31b376..d534fff5c952 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -404,6 +404,7 @@ static void
mt7915_tm_init(struct mt7915_phy *phy, bool en)
{
struct mt7915_dev *dev = phy->dev;
+ int state;
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
@@ -415,7 +416,8 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en)
mt7915_tm_set_trx(phy, TM_MAC_TXRX, !en);
mt7915_mcu_add_bss_info(phy, phy->monitor_vif, en);
- mt7915_mcu_add_sta(dev, phy->monitor_vif, NULL, en);
+ state = en ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
+ mt7915_mcu_add_sta(dev, phy->monitor_vif, NULL, state, true);
if (!en)
mt7915_tm_set_tam_arb(phy, en, 0);
@@ -425,7 +427,7 @@ static void
mt7915_tm_update_channel(struct mt7915_phy *phy)
{
mutex_unlock(&phy->dev->mt76.mutex);
- mt7915_set_channel(phy);
+ mt76_update_channel(phy->mt76);
mutex_lock(&phy->dev->mt76.mutex);
mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index ef0c721d26e3..d1d64fa7d35d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -52,6 +52,8 @@ static int mt7921_thermal_init(struct mt792x_phy *phy)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7921_%s",
wiphy_name(wiphy));
+ if (!name)
+ return -ENOMEM;
hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy,
mt7921_hwmon_groups);
@@ -83,7 +85,7 @@ mt7921_regd_channel_update(struct wiphy *wiphy, struct mt792x_dev *dev)
}
/* UNII-4 */
- if (IS_UNII_INVALID(0, 5850, 5925))
+ if (IS_UNII_INVALID(0, 5845, 5925))
ch->flags |= IEEE80211_CHAN_DISABLED;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 1bab93d049df..a7f5bfbc02ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -455,37 +455,30 @@ static int mt7921_cancel_remain_on_channel(struct ieee80211_hw *hw,
return mt7921_abort_roc(phy, mvif);
}
-static int mt7921_set_channel(struct mt792x_phy *phy)
+int mt7921_set_channel(struct mt76_phy *mphy)
{
+ struct mt792x_phy *phy = mphy->priv;
struct mt792x_dev *dev = phy->dev;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mt792x_mutex_acquire(dev);
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
-
+ mt76_connac_pm_wake(mphy, &dev->pm);
ret = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH));
if (ret)
goto out;
mt792x_mac_set_timeing(phy);
-
mt792x_mac_reset_counters(phy);
phy->noise = 0;
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
- mt792x_mutex_release(dev);
+ mt76_connac_power_save_sched(mphy, &dev->pm);
- mt76_worker_schedule(&dev->mt76.tx_worker);
- ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mt76->mac_work,
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT792x_WATCHDOG_TIME);
return ret;
}
+EXPORT_SYMBOL_GPL(mt7921_set_channel);
static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
@@ -620,11 +613,9 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
int ret = 0;
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- ret = mt7921_set_channel(phy);
+ ret = mt76_update_channel(phy->mt76);
if (ret)
return ret;
- ieee80211_wake_queues(hw);
}
mt792x_mutex_acquire(dev);
@@ -831,13 +822,16 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt7921_mac_sta_add);
-void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int mt7921_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
{
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ if (ev != MT76_STA_EVENT_ASSOC)
+ return 0;
+
mt792x_mutex_acquire(dev);
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
@@ -853,8 +847,10 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
mt792x_mutex_release(dev);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(mt7921_mac_sta_assoc);
+EXPORT_SYMBOL_GPL(mt7921_mac_sta_event);
void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -1183,7 +1179,7 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw,
struct inet6_dev *idev)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_dev *dev = mvif->phy->dev;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct inet6_ifaddr *ifa;
struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
struct sk_buff *skb;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 394fcd799345..02c1de8620a7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -890,7 +890,7 @@ int mt7921_mcu_set_chan_info(struct mt792x_phy *phy, int cmd)
if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ else if (phy->mt76->offchannel)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef,
NL80211_IFTYPE_AP))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 6c5392c5d207..16c89815c0b8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -186,6 +186,7 @@ int __mt7921_start(struct mt792x_phy *phy);
int mt7921_register_device(struct mt792x_dev *dev);
void mt7921_unregister_device(struct mt792x_dev *dev);
int mt7921_run_firmware(struct mt792x_dev *dev);
+int mt7921_set_channel(struct mt76_phy *mphy);
int mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
@@ -244,8 +245,8 @@ int mt7921_mac_init(struct mt792x_dev *dev);
bool mt7921_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask);
int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+int mt7921_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7921_mac_reset_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index a7430216a80d..67723c22aea6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -244,9 +244,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.rx_skb = mt7921_queue_rx_skb,
.rx_poll_complete = mt792x_rx_poll_complete,
.sta_add = mt7921_mac_sta_add,
- .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_event = mt7921_mac_sta_event,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt792x_update_channel,
+ .set_channel = mt7921_set_channel,
};
static const struct mt792x_hif_ops mt7921_pcie_ops = {
.init_reset = mt7921e_init_reset,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 004d942ee11a..95f526f7bb99 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -100,9 +100,10 @@ static int mt7921s_probe(struct sdio_func *func,
.rx_skb = mt7921_queue_rx_skb,
.rx_check = mt7921_rx_check,
.sta_add = mt7921_mac_sta_add,
- .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_event = mt7921_mac_sta_event,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt792x_update_channel,
+ .set_channel = mt7921_set_channel,
};
static const struct mt76_bus_ops mt7921s_ops = {
.rr = mt76s_rr,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index 8b7c03c47598..8aa4f0203208 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -151,9 +151,10 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
.rx_skb = mt7921_queue_rx_skb,
.rx_check = mt7921_rx_check,
.sta_add = mt7921_mac_sta_add,
- .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_event = mt7921_mac_sta_event,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt792x_update_channel,
+ .set_channel = mt7921_set_channel,
};
static const struct mt792x_hif_ops hif_ops = {
.mcu_init = mt7921u_mcu_init,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
index cf36750cf709..634c42bbf23f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
@@ -352,7 +352,7 @@ mt7925_mac_fill_rx_rate(struct mt792x_dev *dev,
static int
mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
{
- u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
bool hdr_trans, unicast, insert_ccmp_hdr = false;
u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
@@ -362,7 +362,6 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
struct mt792x_phy *phy = &dev->phy;
struct ieee80211_supported_band *sband;
u32 csum_status = *(u32 *)skb->cb;
- u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
@@ -420,7 +419,7 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
if (!sband->channels)
return -EINVAL;
- if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
+ if (mt76_is_mmio(&dev->mt76) && (rxd3 & csum_mask) == csum_mask &&
!(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 8c0768bf9343..791c8b00e112 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -439,6 +439,19 @@ static void mt7925_roc_iter(void *priv, u8 *mac,
mt7925_mcu_abort_roc(phy, &mvif->bss_conf, phy->roc_token_id);
}
+void mt7925_roc_abort_sync(struct mt792x_dev *dev)
+{
+ struct mt792x_phy *phy = &dev->phy;
+
+ del_timer_sync(&phy->roc_timer);
+ cancel_work_sync(&phy->roc_work);
+ if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ ieee80211_iterate_interfaces(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7925_roc_iter, (void *)phy);
+}
+EXPORT_SYMBOL_GPL(mt7925_roc_abort_sync);
+
void mt7925_roc_work(struct work_struct *work)
{
struct mt792x_phy *phy;
@@ -1078,23 +1091,26 @@ static void mt7925_mac_link_sta_assoc(struct mt76_dev *mdev,
mt792x_mutex_release(dev);
}
-void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int mt7925_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
{
+ struct ieee80211_link_sta *link_sta = &sta->deflink;
+
+ if (ev != MT76_STA_EVENT_ASSOC)
+ return 0;
+
if (ieee80211_vif_is_mld(vif)) {
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- struct ieee80211_link_sta *link_sta;
link_sta = mt792x_sta_to_link_sta(vif, sta, msta->deflink_id);
-
mt7925_mac_set_links(mdev, vif);
-
- mt7925_mac_link_sta_assoc(mdev, vif, link_sta);
- } else {
- mt7925_mac_link_sta_assoc(mdev, vif, &sta->deflink);
}
+
+ mt7925_mac_link_sta_assoc(mdev, vif, link_sta);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(mt7925_mac_sta_assoc);
+EXPORT_SYMBOL_GPL(mt7925_mac_sta_event);
static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
struct ieee80211_vif *vif,
@@ -1109,6 +1125,8 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
mlink = mt792x_sta_to_link(msta, link_id);
+ mt7925_roc_abort_sync(dev);
+
mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid);
mt76_connac_pm_wake(&dev->mphy, &dev->pm);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index 9dc22fbe25d3..748ea6adbc6b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -638,6 +638,9 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
clc = (const struct mt7925_clc *)(clc_base + offset);
+ if (clc->idx > ARRAY_SIZE(phy->clc))
+ break;
+
/* do not init buf again if chip reset triggered */
if (phy->clc[clc->idx])
continue;
@@ -1770,16 +1773,19 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct sk_buff *skb;
+ int conn_state;
skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid,
MT7925_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
+ conn_state = info->enable ? CONN_STATE_PORT_SECURE :
+ CONN_STATE_DISCONNECT;
if (info->link_sta)
mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
info->link_sta,
- info->enable, info->newly);
+ conn_state, info->newly);
if (info->link_sta && info->enable) {
mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta);
mt7925_mcu_sta_ht_tlv(skb, info->link_sta);
@@ -2171,12 +2177,12 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_RLM, sizeof(*req));
req = (struct bss_rlm_tlv *)tlv;
- req->control_channel = chandef->chan->hw_value,
- req->center_chan = ieee80211_frequency_to_channel(freq1),
- req->center_chan2 = ieee80211_frequency_to_channel(freq2),
- req->tx_streams = hweight8(phy->antenna_mask),
- req->ht_op_info = 4, /* set HT 40M allowed */
- req->rx_streams = hweight8(phy->antenna_mask),
+ req->control_channel = chandef->chan->hw_value;
+ req->center_chan = ieee80211_frequency_to_channel(freq1);
+ req->center_chan2 = ieee80211_frequency_to_channel(freq2);
+ req->tx_streams = hweight8(phy->antenna_mask);
+ req->ht_op_info = 4; /* set HT 40M allowed */
+ req->rx_streams = hweight8(phy->antenna_mask);
req->band = band;
switch (chandef->width) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index 669f3a079d04..f5c02e5f5066 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -219,8 +219,8 @@ int mt7925_mac_init(struct mt792x_dev *dev);
int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
bool mt7925_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask);
-void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+int mt7925_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7925_mac_reset_work(struct work_struct *work);
@@ -307,6 +307,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
enum mt7925_roc_req type, u8 token_id);
int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
u8 token_id);
+void mt7925_roc_abort_sync(struct mt792x_dev *dev);
int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq);
int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 6e4f4e78c350..9aec675450f2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -279,7 +279,7 @@ static int mt7925_pci_probe(struct pci_dev *pdev,
.rx_skb = mt7925_queue_rx_skb,
.rx_poll_complete = mt792x_rx_poll_complete,
.sta_add = mt7925_mac_sta_add,
- .sta_assoc = mt7925_mac_sta_assoc,
+ .sta_event = mt7925_mac_sta_event,
.sta_remove = mt7925_mac_sta_remove,
.update_survey = mt792x_update_channel,
};
@@ -449,6 +449,8 @@ static int mt7925_pci_suspend(struct device *device)
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
+ mt7925_roc_abort_sync(dev);
+
err = mt792x_mcu_drv_pmctrl(dev);
if (err < 0)
goto restore_suspend;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
index 1e0f094fc905..682db1bab21c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
@@ -142,7 +142,7 @@ static int mt7925u_probe(struct usb_interface *usb_intf,
.rx_skb = mt7925_queue_rx_skb,
.rx_check = mt7925_rx_check,
.sta_add = mt7925_mac_sta_add,
- .sta_assoc = mt7925_mac_sta_assoc,
+ .sta_event = mt7925_mac_sta_event,
.sta_remove = mt7925_mac_sta_remove,
.update_survey = mt792x_update_channel,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index 7fa74d59cc48..ab12616ec2b8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -68,7 +68,7 @@ struct mt792x_fw_features {
enum {
MT792x_CLC_POWER,
- MT792x_CLC_CHAN,
+ MT792x_CLC_POWER_EXT,
MT792x_CLC_MAX_NUM,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index 283df84f1b43..5e96973226bb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -42,6 +42,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_160),
+ .beacon_int_min_gcd = 100,
}
};
@@ -941,8 +942,12 @@ void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy)
cap = &phy->mt76->sband_5g.sband.vht_cap.cap;
*cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
- IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
- FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, sts - 1);
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+ if (is_mt7996(phy->mt76->dev))
+ *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 3);
+ else
+ *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 4);
*cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
@@ -987,9 +992,15 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
elem->phy_cap_info[2] |= c;
- c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE;
+
+ if (is_mt7996(phy->mt76->dev))
+ c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ else
+ c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5;
+
elem->phy_cap_info[4] |= c;
/* do not support NG16 due to spec D4.0 changes subcarrier idx */
@@ -1011,8 +1022,6 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
return;
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
- if (vif == NL80211_IFTYPE_AP)
- elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
sts - 1) |
@@ -1020,6 +1029,11 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
sts - 1);
elem->phy_cap_info[5] |= c;
+ if (vif != NL80211_IFTYPE_AP)
+ return;
+
+ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] |= c;
@@ -1179,12 +1193,12 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
IEEE80211_EHT_MAC_CAP0_OM_CONTROL;
eht_cap_elem->phy_cap_info[0] =
- IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ |
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
- val = max_t(u8, sts - 1, 3);
+ /* Set the maximum capability regardless of the antenna configuration. */
+ val = is_mt7992(phy->mt76->dev) ? 4 : 3;
eht_cap_elem->phy_cap_info[0] |=
u8_encode_bits(u8_get_bits(val, BIT(0)),
IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
@@ -1193,30 +1207,36 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)),
IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
u8_encode_bits(val,
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) |
- u8_encode_bits(val,
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
eht_cap_elem->phy_cap_info[2] =
u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) |
- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK) |
- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK);
+ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK);
+
+ if (band == NL80211_BAND_6GHZ) {
+ eht_cap_elem->phy_cap_info[0] |=
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+
+ eht_cap_elem->phy_cap_info[1] |=
+ u8_encode_bits(val,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[2] |=
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK);
+ }
eht_cap_elem->phy_cap_info[3] =
IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
- IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
- IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
- IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
- IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK;
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK;
eht_cap_elem->phy_cap_info[4] =
u8_encode_bits(min_t(int, sts - 1, 2),
IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
eht_cap_elem->phy_cap_info[5] =
- IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US,
IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) |
u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)),
@@ -1230,14 +1250,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) |
u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK);
- eht_cap_elem->phy_cap_info[7] =
- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ;
-
val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) |
u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_TX);
#define SET_EHT_MAX_NSS(_bw, _val) do { \
@@ -1248,8 +1260,29 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
SET_EHT_MAX_NSS(80, val);
SET_EHT_MAX_NSS(160, val);
- SET_EHT_MAX_NSS(320, val);
+ if (band == NL80211_BAND_6GHZ)
+ SET_EHT_MAX_NSS(320, val);
#undef SET_EHT_MAX_NSS
+
+ if (iftype != NL80211_IFTYPE_AP)
+ return;
+
+ eht_cap_elem->phy_cap_info[3] |=
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK;
+
+ eht_cap_elem->phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ;
+
+ if (band != NL80211_BAND_6GHZ)
+ return;
+
+ eht_cap_elem->phy_cap_info[7] |=
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ;
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index bc7111a71f98..0d21414e2c88 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -435,7 +435,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
u32 rxd4 = le32_to_cpu(rxd[4]);
- u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
u32 csum_status = *(u32 *)skb->cb;
u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP;
bool is_mesh = (rxd0 & mesh_mask) == mesh_mask;
@@ -497,7 +497,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
if (!sband->channels)
return -EINVAL;
- if ((rxd0 & csum_mask) == csum_mask &&
+ if ((rxd3 & csum_mask) == csum_mask &&
!(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -746,7 +746,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool multicast = is_multicast_ether_addr(hdr->addr1);
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- __le16 fc = hdr->frame_control;
+ __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl;
u8 fc_type, fc_stype;
u32 val;
@@ -780,6 +780,15 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
+ if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
+ else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
+ else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
+ else
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE);
+
txwi[2] |= cpu_to_le32(val);
txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
@@ -789,7 +798,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
}
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(hdr->seq_ctrl);
+ u16 seqno = le16_to_cpu(sc);
if (ieee80211_is_back_req(hdr->frame_control)) {
struct ieee80211_bar *bar;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index bce082038219..39f071ece35e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -206,7 +206,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
mvif->mt76.omac_idx = idx;
mvif->phy = phy;
mvif->mt76.band_idx = band_idx;
- mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
+ mvif->mt76.wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3;
ret = mt7996_mcu_add_dev_info(phy, vif, true);
if (ret)
@@ -291,22 +291,19 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw,
mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
}
-int mt7996_set_channel(struct mt7996_phy *phy)
+int mt7996_set_channel(struct mt76_phy *mphy)
{
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_phy *phy = mphy->priv;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
-
ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_SWITCH);
if (ret)
goto out;
+ ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_RX_PATH);
+ if (ret)
+ goto out;
+
ret = mt7996_dfs_init_radar_detector(phy);
mt7996_mac_cca_stats_reset(phy);
@@ -314,13 +311,7 @@ int mt7996_set_channel(struct mt7996_phy *phy)
phy->noise = 0;
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
- mutex_unlock(&dev->mt76.mutex);
-
- mt76_txq_schedule_all(phy->mt76);
-
- ieee80211_queue_delayed_work(phy->mt76->hw,
- &phy->mt76->mac_work,
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT7996_WATCHDOG_TIME);
return ret;
@@ -360,14 +351,14 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_SMS4:
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
- wcid_keyidx = &wcid->hw_key_idx2;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
- fallthrough;
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- if (key->keyidx == 6 || key->keyidx == 7)
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ wcid_keyidx = &wcid->hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
break;
+ }
fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
@@ -411,11 +402,9 @@ static int mt7996_config(struct ieee80211_hw *hw, u32 changed)
int ret;
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- ret = mt7996_set_channel(phy);
+ ret = mt76_update_channel(phy->mt76);
if (ret)
return ret;
- ieee80211_wake_queues(hw);
}
if (changed & (IEEE80211_CONF_CHANGE_POWER |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 2e4fa9f48dfb..6c445a9dbc03 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -371,7 +371,7 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
&dev->rdd2_chandef,
GFP_ATOMIC);
else
- ieee80211_radar_detected(mphy->hw);
+ ieee80211_radar_detected(mphy->hw, NULL);
dev->hw_pattern++;
}
@@ -735,7 +735,7 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb)
static struct tlv *
mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len)
{
- struct tlv *ptlv = skb_put(skb, len);
+ struct tlv *ptlv = skb_put_zero(skb, len);
ptlv->tag = cpu_to_le16(tag);
ptlv->len = cpu_to_le16(len);
@@ -822,7 +822,7 @@ mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct bss_info_uni_mbssid *mbssid;
struct tlv *tlv;
- if (!vif->bss_conf.bssid_indicator)
+ if (!vif->bss_conf.bssid_indicator && enable)
return;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_11V_MBSSID, sizeof(*mbssid));
@@ -1429,10 +1429,10 @@ mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif,
if (bfee)
return vif->bss_conf.eht_su_beamformee &&
- EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
+ EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
else
return vif->bss_conf.eht_su_beamformer &&
- EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
+ EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
}
if (sta->deflink.he_cap.has_he) {
@@ -1544,6 +1544,9 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
u8 nss_mcs = mt7996_mcu_get_sta_nss(mcs_map);
u8 snd_dim, sts;
+ if (!vc)
+ return;
+
bf->tx_mode = MT_PHY_TYPE_HE_SU;
mt7996_mcu_sta_sounding_rate(bf);
@@ -1653,7 +1656,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_phy *phy = mvif->phy;
- int tx_ant = hweight8(phy->mt76->chainmask) - 1;
+ int tx_ant = hweight16(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
static const u8 matrix[4][4] = {
@@ -2160,6 +2163,7 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta;
struct mt7996_sta *msta;
struct sk_buff *skb;
+ int conn_state;
int ret;
msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
@@ -2172,8 +2176,9 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
+ conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta,
- enable, newly);
+ conn_state, newly);
if (!enable)
goto out;
@@ -3460,7 +3465,7 @@ int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag)
if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
+ else if (phy->mt76->offchannel ||
phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
@@ -3923,8 +3928,9 @@ int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action)
tlv = mt7996_mcu_add_uni_tlv(skb, action, sizeof(*req_mod_en));
req_mod_en = (struct bf_mod_en_ctrl *)tlv;
- req_mod_en->bf_num = 3;
- req_mod_en->bf_bitmap = GENMASK(2, 0);
+ req_mod_en->bf_num = mt7996_band_valid(dev, MT_BAND2) ? 3 : 2;
+ req_mod_en->bf_bitmap = mt7996_band_valid(dev, MT_BAND2) ?
+ GENMASK(2, 0) : GENMASK(1, 0);
break;
}
default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 928a9663b49e..40e45fb2b626 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -620,6 +620,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
.sta_add = mt7996_mac_sta_add,
.sta_remove = mt7996_mac_sta_remove,
.update_survey = mt7996_update_channel,
+ .set_channel = mt7996_set_channel,
};
struct mt7996_dev *dev;
struct mt76_dev *mdev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 177cfff31120..ab8c9070630b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -468,7 +468,7 @@ int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_he_obss_pd *he_obss_pd);
int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool changed);
-int mt7996_set_channel(struct mt7996_phy *phy);
+int mt7996_set_channel(struct mt76_phy *mphy);
int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag);
int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif);
int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 5cf6edee4d13..ce193e625666 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -313,6 +313,9 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
return idx;
wcid = (struct mt76_wcid *)sta->drv_priv;
+ if (!wcid->sta)
+ return idx;
+
q->entry[idx].wcid = wcid->idx;
if (!non_aql)
@@ -330,6 +333,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct sk_buff_head *head;
if (mt76_testmode_enabled(phy)) {
ieee80211_free_txskb(phy->hw, skb);
@@ -345,9 +349,15 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
- spin_lock_bh(&wcid->tx_pending.lock);
- __skb_queue_tail(&wcid->tx_pending, skb);
- spin_unlock_bh(&wcid->tx_pending.lock);
+ if ((info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
+ (info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK))
+ head = &wcid->tx_offchannel;
+ else
+ head = &wcid->tx_pending;
+
+ spin_lock_bh(&head->lock);
+ __skb_queue_tail(head, skb);
+ spin_unlock_bh(&head->lock);
spin_lock_bh(&phy->tx_lock);
if (list_empty(&wcid->tx_list))
@@ -478,7 +488,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
return idx;
do {
- if (test_bit(MT76_RESET, &phy->state))
+ if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
return -EBUSY;
if (stop || mt76_txq_stopped(q))
@@ -522,7 +532,7 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
while (1) {
int n_frames = 0;
- if (test_bit(MT76_RESET, &phy->state))
+ if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
return -EBUSY;
if (dev->queue_ops->tx_cleanup &&
@@ -568,7 +578,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
{
int len;
- if (qid >= 4)
+ if (qid >= 4 || phy->offchannel)
return;
local_bh_disable();
@@ -586,7 +596,8 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
static int
-mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
+mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid,
+ struct sk_buff_head *head)
{
struct mt76_dev *dev = phy->dev;
struct ieee80211_sta *sta;
@@ -594,8 +605,8 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
struct sk_buff *skb;
int ret = 0;
- spin_lock(&wcid->tx_pending.lock);
- while ((skb = skb_peek(&wcid->tx_pending)) != NULL) {
+ spin_lock(&head->lock);
+ while ((skb = skb_peek(head)) != NULL) {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int qid = skb_get_queue_mapping(skb);
@@ -607,13 +618,13 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
qid = MT_TXQ_PSD;
q = phy->q_tx[qid];
- if (mt76_txq_stopped(q)) {
+ if (mt76_txq_stopped(q) || test_bit(MT76_RESET, &phy->state)) {
ret = -1;
break;
}
- __skb_unlink(skb, &wcid->tx_pending);
- spin_unlock(&wcid->tx_pending.lock);
+ __skb_unlink(skb, head);
+ spin_unlock(&head->lock);
sta = wcid_to_sta(wcid);
spin_lock(&q->lock);
@@ -621,15 +632,17 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
dev->queue_ops->kick(dev, q);
spin_unlock(&q->lock);
- spin_lock(&wcid->tx_pending.lock);
+ spin_lock(&head->lock);
}
- spin_unlock(&wcid->tx_pending.lock);
+ spin_unlock(&head->lock);
return ret;
}
static void mt76_txq_schedule_pending(struct mt76_phy *phy)
{
+ LIST_HEAD(tx_list);
+
if (list_empty(&phy->tx_list))
return;
@@ -637,22 +650,27 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
rcu_read_lock();
spin_lock(&phy->tx_lock);
- while (!list_empty(&phy->tx_list)) {
- struct mt76_wcid *wcid = NULL;
+ list_splice_init(&phy->tx_list, &tx_list);
+ while (!list_empty(&tx_list)) {
+ struct mt76_wcid *wcid;
int ret;
- wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list);
+ wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list);
list_del_init(&wcid->tx_list);
spin_unlock(&phy->tx_lock);
- ret = mt76_txq_schedule_pending_wcid(phy, wcid);
+ ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel);
+ if (ret >= 0 && !phy->offchannel)
+ ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending);
spin_lock(&phy->tx_lock);
- if (ret) {
- if (list_empty(&wcid->tx_list))
- list_add_tail(&wcid->tx_list, &phy->tx_list);
+ if (!skb_queue_empty(&wcid->tx_pending) &&
+ !skb_queue_empty(&wcid->tx_offchannel) &&
+ list_empty(&wcid->tx_list))
+ list_add_tail(&wcid->tx_list, &phy->tx_list);
+
+ if (ret < 0)
break;
- }
}
spin_unlock(&phy->tx_lock);
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.h b/drivers/net/wireless/mediatek/mt7601u/dma.h
index 81e559ec1c7b..cda9c267516e 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.h
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.h
@@ -7,7 +7,7 @@
#ifndef __MT7601U_DMA_H
#define __MT7601U_DMA_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/skbuff.h>
#define MT_DMA_HDR_LEN 4
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
index 625bebe60538..d4d31a546556 100644
--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
@@ -8,7 +8,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "mt7601u.h"
#include "eeprom.h"
#include "mac.h"
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index 3c48e1a57b24..bba53307b960 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -384,6 +384,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct wilc_join_bss_param *param;
u8 rates_len = 0;
int ies_len;
+ u64 ies_tsf;
int ret;
param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -399,6 +400,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
return NULL;
}
ies_len = ies->len;
+ ies_tsf = ies->tsf;
rcu_read_unlock();
param->beacon_period = cpu_to_le16(bss->beacon_interval);
@@ -454,7 +456,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *)&noa_attr, sizeof(noa_attr));
if (ret > 0) {
- param->tsf_lo = cpu_to_le32(ies->tsf);
+ param->tsf_lo = cpu_to_le32(ies_tsf);
param->noa_enabled = 1;
param->idx = noa_attr.index;
if (noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) {
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 0043f7a0fdf9..b4da05d5a498 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -174,19 +174,18 @@ static int wilc_sdio_probe(struct sdio_func *func,
wilc->bus_data = sdio_priv;
wilc->dev = &func->dev;
- wilc->rtc_clk = devm_clk_get_optional(&func->card->dev, "rtc");
+ wilc->rtc_clk = devm_clk_get_optional_enabled(&func->card->dev, "rtc");
if (IS_ERR(wilc->rtc_clk)) {
ret = PTR_ERR(wilc->rtc_clk);
goto dispose_irq;
}
- clk_prepare_enable(wilc->rtc_clk);
wilc_sdio_init(wilc, false);
ret = wilc_load_mac_from_nv(wilc);
if (ret) {
pr_err("Can not retrieve MAC address from chip\n");
- goto clk_disable_unprepare;
+ goto dispose_irq;
}
wilc_sdio_deinit(wilc);
@@ -195,14 +194,12 @@ static int wilc_sdio_probe(struct sdio_func *func,
NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
ret = PTR_ERR(vif);
- goto clk_disable_unprepare;
+ goto dispose_irq;
}
dev_info(&func->dev, "Driver Initializing success\n");
return 0;
-clk_disable_unprepare:
- clk_disable_unprepare(wilc->rtc_clk);
dispose_irq:
irq_dispose_mapping(wilc->dev_irq_num);
wilc_netdev_cleanup(wilc);
@@ -217,7 +214,6 @@ static void wilc_sdio_remove(struct sdio_func *func)
struct wilc *wilc = sdio_get_drvdata(func);
struct wilc_sdio *sdio_priv = wilc->bus_data;
- clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
@@ -977,6 +973,9 @@ static int wilc_sdio_suspend(struct device *dev)
dev_info(dev, "sdio suspend\n");
+ if (!wilc->initialized)
+ return 0;
+
if (!IS_ERR(wilc->rtc_clk))
clk_disable_unprepare(wilc->rtc_clk);
@@ -999,6 +998,13 @@ static int wilc_sdio_resume(struct device *dev)
struct wilc *wilc = sdio_get_drvdata(func);
dev_info(dev, "sdio resume\n");
+
+ if (!wilc->initialized)
+ return 0;
+
+ if (!IS_ERR(wilc->rtc_clk))
+ clk_prepare_enable(wilc->rtc_clk);
+
wilc_sdio_init(wilc, true);
wilc_sdio_enable_interrupt(wilc);
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 5ff940c53ad9..05b577b1068e 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -228,12 +228,11 @@ static int wilc_bus_probe(struct spi_device *spi)
if (ret < 0)
goto netdev_cleanup;
- wilc->rtc_clk = devm_clk_get_optional(&spi->dev, "rtc");
+ wilc->rtc_clk = devm_clk_get_optional_enabled(&spi->dev, "rtc");
if (IS_ERR(wilc->rtc_clk)) {
ret = PTR_ERR(wilc->rtc_clk);
goto netdev_cleanup;
}
- clk_prepare_enable(wilc->rtc_clk);
dev_info(&spi->dev, "Selected CRC config: crc7=%s, crc16=%s\n",
enable_crc7 ? "on" : "off", enable_crc16 ? "on" : "off");
@@ -266,7 +265,6 @@ static int wilc_bus_probe(struct spi_device *spi)
return 0;
power_down:
- clk_disable_unprepare(wilc->rtc_clk);
wilc_wlan_power(wilc, false);
netdev_cleanup:
wilc_netdev_cleanup(wilc);
@@ -280,7 +278,6 @@ static void wilc_bus_remove(struct spi_device *spi)
struct wilc *wilc = spi_get_drvdata(spi);
struct wilc_spi *spi_priv = wilc->bus_data;
- clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
kfree(spi_priv);
}
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
index 15334940287d..56d1139ba8bc 100644
--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
@@ -16,7 +16,7 @@
#include <linux/string.h>
#include <linux/module.h>
#include <net/mac80211.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/sysfs.h>
#include "mac.h"
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 663d77770fce..8b97accf6638 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -837,7 +837,7 @@ static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev,
static int qtnf_start_radar_detection(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms)
+ u32 cac_time_ms, int link_id)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
int ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 76b07db284f8..71840f41b73c 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -520,21 +520,21 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif,
cfg80211_radar_event(wiphy, &chandef, GFP_KERNEL);
break;
case QLINK_RADAR_CAC_FINISHED:
- if (!vif->wdev.cac_started)
+ if (!vif->wdev.links[0].cac_started)
break;
cfg80211_cac_event(vif->netdev, &chandef,
- NL80211_RADAR_CAC_FINISHED, GFP_KERNEL);
+ NL80211_RADAR_CAC_FINISHED, GFP_KERNEL, 0);
break;
case QLINK_RADAR_CAC_ABORTED:
- if (!vif->wdev.cac_started)
+ if (!vif->wdev.links[0].cac_started)
break;
cfg80211_cac_event(vif->netdev, &chandef,
- NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
+ NL80211_RADAR_CAC_ABORTED, GFP_KERNEL, 0);
break;
case QLINK_RADAR_CAC_STARTED:
- if (vif->wdev.cac_started)
+ if (vif->wdev.links[0].cac_started)
break;
if (!wiphy_ext_feature_isset(wiphy,
@@ -542,7 +542,7 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif,
break;
cfg80211_cac_event(vif->netdev, &chandef,
- NL80211_RADAR_CAC_STARTED, GFP_KERNEL);
+ NL80211_RADAR_CAC_STARTED, GFP_KERNEL, 0);
break;
default:
pr_warn("%s: unhandled radar event %u\n",
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
index 44ad94757a03..14d0343368ac 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
@@ -20,9 +20,8 @@ config RTL8XXXU
memory footprint than the vendor drivers and benefits
from the in kernel mac80211 stack.
- It can coexist with drivers from drivers/staging/rtl8723au,
- drivers/staging/rtl8192u, and drivers/net/wireless/rtlwifi,
- but you will need to control which module you wish to load.
+ It can coexist with the rtlwifi driver but you will need
+ to control which module you wish to load.
To compile this driver as a module, choose M here: the module will
be called rtl8xxxu. If unsure, say N.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index 043fa364e701..7891c988dd5f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -8110,6 +8110,12 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817f, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x819a, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8754, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817c, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
/* Tested by Larry Finger */
{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7811, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c
index 700c6e2bcad1..ff458fb8514d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c
@@ -181,11 +181,11 @@ static void _rtl92du_init_queue_reserved_page(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u32 txqpagenum, txqpageunit;
u32 txqremainingpage;
+ u32 value32 = 0;
u32 numhq = 0;
u32 numlq = 0;
u32 numnq = 0;
u32 numpubq;
- u32 value32;
if (rtlhal->macphymode != SINGLEMAC_SINGLEPHY) {
numpubq = NORMAL_PAGE_NUM_PUBQ_92D_DUAL_MAC;
diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig
index 22838ede03cd..02b0d698413b 100644
--- a/drivers/net/wireless/realtek/rtw88/Kconfig
+++ b/drivers/net/wireless/realtek/rtw88/Kconfig
@@ -12,6 +12,7 @@ if RTW88
config RTW88_CORE
tristate
+ select WANT_DEV_COREDUMP
config RTW88_PCI
tristate
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index de3332eb7a22..a99776af56c2 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -2194,7 +2194,6 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 table_case, tdma_case;
- bool wl_cpt_test = false, bt_cpt_test = false;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2202,29 +2201,16 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
if (efuse->share_ant) {
/* Shared-Ant */
- if (wl_cpt_test) {
- if (coex_stat->wl_gl_busy) {
- table_case = 20;
- tdma_case = 17;
- } else {
- table_case = 10;
- tdma_case = 15;
- }
- } else if (bt_cpt_test) {
- table_case = 26;
- tdma_case = 26;
- } else {
- if (coex_stat->wl_gl_busy &&
- coex_stat->wl_noisy_level == 0)
- table_case = 14;
- else
- table_case = 10;
+ if (coex_stat->wl_gl_busy &&
+ coex_stat->wl_noisy_level == 0)
+ table_case = 14;
+ else
+ table_case = 10;
- if (coex_stat->wl_gl_busy)
- tdma_case = 15;
- else
- tdma_case = 20;
- }
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 15;
+ else
+ tdma_case = 20;
} else {
/* Non-Shared-Ant */
table_case = 112;
@@ -2235,11 +2221,7 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
tdma_case = 120;
}
- if (wl_cpt_test)
- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[1]);
- else
- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
-
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
rtw_coex_table(rtwdev, false, table_case);
rtw_coex_tdma(rtwdev, false, tdma_case);
}
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 5b2036798159..c26a6905fd15 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -43,6 +43,62 @@ struct rtw_debugfs_priv {
};
};
+struct rtw_debugfs {
+ struct rtw_debugfs_priv mac_0;
+ struct rtw_debugfs_priv mac_1;
+ struct rtw_debugfs_priv mac_2;
+ struct rtw_debugfs_priv mac_3;
+ struct rtw_debugfs_priv mac_4;
+ struct rtw_debugfs_priv mac_5;
+ struct rtw_debugfs_priv mac_6;
+ struct rtw_debugfs_priv mac_7;
+ struct rtw_debugfs_priv mac_10;
+ struct rtw_debugfs_priv mac_11;
+ struct rtw_debugfs_priv mac_12;
+ struct rtw_debugfs_priv mac_13;
+ struct rtw_debugfs_priv mac_14;
+ struct rtw_debugfs_priv mac_15;
+ struct rtw_debugfs_priv mac_16;
+ struct rtw_debugfs_priv mac_17;
+ struct rtw_debugfs_priv bb_8;
+ struct rtw_debugfs_priv bb_9;
+ struct rtw_debugfs_priv bb_a;
+ struct rtw_debugfs_priv bb_b;
+ struct rtw_debugfs_priv bb_c;
+ struct rtw_debugfs_priv bb_d;
+ struct rtw_debugfs_priv bb_e;
+ struct rtw_debugfs_priv bb_f;
+ struct rtw_debugfs_priv bb_18;
+ struct rtw_debugfs_priv bb_19;
+ struct rtw_debugfs_priv bb_1a;
+ struct rtw_debugfs_priv bb_1b;
+ struct rtw_debugfs_priv bb_1c;
+ struct rtw_debugfs_priv bb_1d;
+ struct rtw_debugfs_priv bb_1e;
+ struct rtw_debugfs_priv bb_1f;
+ struct rtw_debugfs_priv bb_2c;
+ struct rtw_debugfs_priv bb_2d;
+ struct rtw_debugfs_priv bb_40;
+ struct rtw_debugfs_priv bb_41;
+ struct rtw_debugfs_priv rf_dump;
+ struct rtw_debugfs_priv tx_pwr_tbl;
+ struct rtw_debugfs_priv write_reg;
+ struct rtw_debugfs_priv h2c;
+ struct rtw_debugfs_priv rf_write;
+ struct rtw_debugfs_priv rf_read;
+ struct rtw_debugfs_priv read_reg;
+ struct rtw_debugfs_priv fix_rate;
+ struct rtw_debugfs_priv dump_cam;
+ struct rtw_debugfs_priv rsvd_page;
+ struct rtw_debugfs_priv phy_info;
+ struct rtw_debugfs_priv coex_enable;
+ struct rtw_debugfs_priv coex_info;
+ struct rtw_debugfs_priv edcca_enable;
+ struct rtw_debugfs_priv fw_crash;
+ struct rtw_debugfs_priv force_lowest_basic_rate;
+ struct rtw_debugfs_priv dm_cap;
+};
+
static const char * const rtw_dm_cap_strs[] = {
[RTW_DM_CAP_NA] = "NA",
[RTW_DM_CAP_TXGAPK] = "TXGAPK",
@@ -524,7 +580,7 @@ static int rtw_debug_get_bb_page(struct seq_file *m, void *v)
return 0;
}
-static int rtw_debug_get_rf_dump(struct seq_file *m, void *v)
+static int rtw_debugfs_get_rf_dump(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
@@ -1074,139 +1130,102 @@ static int rtw_debugfs_get_dm_cap(struct seq_file *m, void *v)
return 0;
}
-#define rtw_debug_impl_mac(page, addr) \
-static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \
+#define rtw_debug_priv_mac(addr) \
+{ \
.cb_read = rtw_debug_get_mac_page, \
.cb_data = addr, \
}
-rtw_debug_impl_mac(0, 0x0000);
-rtw_debug_impl_mac(1, 0x0100);
-rtw_debug_impl_mac(2, 0x0200);
-rtw_debug_impl_mac(3, 0x0300);
-rtw_debug_impl_mac(4, 0x0400);
-rtw_debug_impl_mac(5, 0x0500);
-rtw_debug_impl_mac(6, 0x0600);
-rtw_debug_impl_mac(7, 0x0700);
-rtw_debug_impl_mac(10, 0x1000);
-rtw_debug_impl_mac(11, 0x1100);
-rtw_debug_impl_mac(12, 0x1200);
-rtw_debug_impl_mac(13, 0x1300);
-rtw_debug_impl_mac(14, 0x1400);
-rtw_debug_impl_mac(15, 0x1500);
-rtw_debug_impl_mac(16, 0x1600);
-rtw_debug_impl_mac(17, 0x1700);
-
-#define rtw_debug_impl_bb(page, addr) \
-static struct rtw_debugfs_priv rtw_debug_priv_bb_ ##page = { \
+#define rtw_debug_priv_bb(addr) \
+{ \
.cb_read = rtw_debug_get_bb_page, \
.cb_data = addr, \
}
-rtw_debug_impl_bb(8, 0x0800);
-rtw_debug_impl_bb(9, 0x0900);
-rtw_debug_impl_bb(a, 0x0a00);
-rtw_debug_impl_bb(b, 0x0b00);
-rtw_debug_impl_bb(c, 0x0c00);
-rtw_debug_impl_bb(d, 0x0d00);
-rtw_debug_impl_bb(e, 0x0e00);
-rtw_debug_impl_bb(f, 0x0f00);
-rtw_debug_impl_bb(18, 0x1800);
-rtw_debug_impl_bb(19, 0x1900);
-rtw_debug_impl_bb(1a, 0x1a00);
-rtw_debug_impl_bb(1b, 0x1b00);
-rtw_debug_impl_bb(1c, 0x1c00);
-rtw_debug_impl_bb(1d, 0x1d00);
-rtw_debug_impl_bb(1e, 0x1e00);
-rtw_debug_impl_bb(1f, 0x1f00);
-rtw_debug_impl_bb(2c, 0x2c00);
-rtw_debug_impl_bb(2d, 0x2d00);
-rtw_debug_impl_bb(40, 0x4000);
-rtw_debug_impl_bb(41, 0x4100);
-
-static struct rtw_debugfs_priv rtw_debug_priv_rf_dump = {
- .cb_read = rtw_debug_get_rf_dump,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_tx_pwr_tbl = {
- .cb_read = rtw_debugfs_get_tx_pwr_tbl,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_write_reg = {
- .cb_write = rtw_debugfs_set_write_reg,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_h2c = {
- .cb_write = rtw_debugfs_set_h2c,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_rf_write = {
- .cb_write = rtw_debugfs_set_rf_write,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_rf_read = {
- .cb_write = rtw_debugfs_set_rf_read,
- .cb_read = rtw_debugfs_get_rf_read,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_read_reg = {
- .cb_write = rtw_debugfs_set_read_reg,
- .cb_read = rtw_debugfs_get_read_reg,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_fix_rate = {
- .cb_write = rtw_debugfs_set_fix_rate,
- .cb_read = rtw_debugfs_get_fix_rate,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_dump_cam = {
- .cb_write = rtw_debugfs_set_single_input,
- .cb_read = rtw_debugfs_get_dump_cam,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_rsvd_page = {
- .cb_write = rtw_debugfs_set_rsvd_page,
- .cb_read = rtw_debugfs_get_rsvd_page,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_phy_info = {
- .cb_read = rtw_debugfs_get_phy_info,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_coex_enable = {
- .cb_write = rtw_debugfs_set_coex_enable,
- .cb_read = rtw_debugfs_get_coex_enable,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_coex_info = {
- .cb_read = rtw_debugfs_get_coex_info,
-};
+#define rtw_debug_priv_get(name) \
+{ \
+ .cb_read = rtw_debugfs_get_ ##name, \
+}
-static struct rtw_debugfs_priv rtw_debug_priv_edcca_enable = {
- .cb_write = rtw_debugfs_set_edcca_enable,
- .cb_read = rtw_debugfs_get_edcca_enable,
-};
+#define rtw_debug_priv_set(name) \
+{ \
+ .cb_write = rtw_debugfs_set_ ##name, \
+}
-static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = {
- .cb_write = rtw_debugfs_set_fw_crash,
- .cb_read = rtw_debugfs_get_fw_crash,
-};
+#define rtw_debug_priv_set_and_get(name) \
+{ \
+ .cb_write = rtw_debugfs_set_ ##name, \
+ .cb_read = rtw_debugfs_get_ ##name, \
+}
-static struct rtw_debugfs_priv rtw_debug_priv_force_lowest_basic_rate = {
- .cb_write = rtw_debugfs_set_force_lowest_basic_rate,
- .cb_read = rtw_debugfs_get_force_lowest_basic_rate,
-};
+#define rtw_debug_priv_set_single_and_get(name) \
+{ \
+ .cb_write = rtw_debugfs_set_single_input, \
+ .cb_read = rtw_debugfs_get_ ##name, \
+}
-static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = {
- .cb_write = rtw_debugfs_set_dm_cap,
- .cb_read = rtw_debugfs_get_dm_cap,
+static const struct rtw_debugfs rtw_debugfs_templ = {
+ .mac_0 = rtw_debug_priv_mac(0x0000),
+ .mac_1 = rtw_debug_priv_mac(0x0100),
+ .mac_2 = rtw_debug_priv_mac(0x0200),
+ .mac_3 = rtw_debug_priv_mac(0x0300),
+ .mac_4 = rtw_debug_priv_mac(0x0400),
+ .mac_5 = rtw_debug_priv_mac(0x0500),
+ .mac_6 = rtw_debug_priv_mac(0x0600),
+ .mac_7 = rtw_debug_priv_mac(0x0700),
+ .mac_10 = rtw_debug_priv_mac(0x1000),
+ .mac_11 = rtw_debug_priv_mac(0x1100),
+ .mac_12 = rtw_debug_priv_mac(0x1200),
+ .mac_13 = rtw_debug_priv_mac(0x1300),
+ .mac_14 = rtw_debug_priv_mac(0x1400),
+ .mac_15 = rtw_debug_priv_mac(0x1500),
+ .mac_16 = rtw_debug_priv_mac(0x1600),
+ .mac_17 = rtw_debug_priv_mac(0x1700),
+ .bb_8 = rtw_debug_priv_bb(0x0800),
+ .bb_9 = rtw_debug_priv_bb(0x0900),
+ .bb_a = rtw_debug_priv_bb(0x0a00),
+ .bb_b = rtw_debug_priv_bb(0x0b00),
+ .bb_c = rtw_debug_priv_bb(0x0c00),
+ .bb_d = rtw_debug_priv_bb(0x0d00),
+ .bb_e = rtw_debug_priv_bb(0x0e00),
+ .bb_f = rtw_debug_priv_bb(0x0f00),
+ .bb_18 = rtw_debug_priv_bb(0x1800),
+ .bb_19 = rtw_debug_priv_bb(0x1900),
+ .bb_1a = rtw_debug_priv_bb(0x1a00),
+ .bb_1b = rtw_debug_priv_bb(0x1b00),
+ .bb_1c = rtw_debug_priv_bb(0x1c00),
+ .bb_1d = rtw_debug_priv_bb(0x1d00),
+ .bb_1e = rtw_debug_priv_bb(0x1e00),
+ .bb_1f = rtw_debug_priv_bb(0x1f00),
+ .bb_2c = rtw_debug_priv_bb(0x2c00),
+ .bb_2d = rtw_debug_priv_bb(0x2d00),
+ .bb_40 = rtw_debug_priv_bb(0x4000),
+ .bb_41 = rtw_debug_priv_bb(0x4100),
+ .rf_dump = rtw_debug_priv_get(rf_dump),
+ .tx_pwr_tbl = rtw_debug_priv_get(tx_pwr_tbl),
+ .write_reg = rtw_debug_priv_set(write_reg),
+ .h2c = rtw_debug_priv_set(h2c),
+ .rf_write = rtw_debug_priv_set(rf_write),
+ .rf_read = rtw_debug_priv_set_and_get(rf_read),
+ .read_reg = rtw_debug_priv_set_and_get(read_reg),
+ .fix_rate = rtw_debug_priv_set_and_get(fix_rate),
+ .dump_cam = rtw_debug_priv_set_single_and_get(dump_cam),
+ .rsvd_page = rtw_debug_priv_set_and_get(rsvd_page),
+ .phy_info = rtw_debug_priv_get(phy_info),
+ .coex_enable = rtw_debug_priv_set_and_get(coex_enable),
+ .coex_info = rtw_debug_priv_get(coex_info),
+ .edcca_enable = rtw_debug_priv_set_and_get(edcca_enable),
+ .fw_crash = rtw_debug_priv_set_and_get(fw_crash),
+ .force_lowest_basic_rate = rtw_debug_priv_set_and_get(force_lowest_basic_rate),
+ .dm_cap = rtw_debug_priv_set_and_get(dm_cap),
};
#define rtw_debugfs_add_core(name, mode, fopname, parent) \
do { \
- rtw_debug_priv_ ##name.rtwdev = rtwdev; \
+ struct rtw_debugfs_priv *priv = &rtwdev->debugfs->name; \
+ priv->rtwdev = rtwdev; \
if (IS_ERR(debugfs_create_file(#name, mode, \
- parent, &rtw_debug_priv_ ##name,\
+ parent, priv, \
&file_ops_ ##fopname))) \
pr_debug("Unable to initialize debugfs:%s\n", \
#name); \
@@ -1219,12 +1238,9 @@ static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = {
#define rtw_debugfs_add_r(name) \
rtw_debugfs_add_core(name, S_IFREG | 0444, single_r, debugfs_topdir)
-void rtw_debugfs_init(struct rtw_dev *rtwdev)
+static
+void rtw_debugfs_add_basic(struct rtw_dev *rtwdev, struct dentry *debugfs_topdir)
{
- struct dentry *debugfs_topdir;
-
- debugfs_topdir = debugfs_create_dir("rtw88",
- rtwdev->hw->wiphy->debugfsdir);
rtw_debugfs_add_w(write_reg);
rtw_debugfs_add_rw(read_reg);
rtw_debugfs_add_w(rf_write);
@@ -1236,6 +1252,17 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_r(coex_info);
rtw_debugfs_add_rw(coex_enable);
rtw_debugfs_add_w(h2c);
+ rtw_debugfs_add_r(rf_dump);
+ rtw_debugfs_add_r(tx_pwr_tbl);
+ rtw_debugfs_add_rw(edcca_enable);
+ rtw_debugfs_add_rw(fw_crash);
+ rtw_debugfs_add_rw(force_lowest_basic_rate);
+ rtw_debugfs_add_rw(dm_cap);
+}
+
+static
+void rtw_debugfs_add_sec0(struct rtw_dev *rtwdev, struct dentry *debugfs_topdir)
+{
rtw_debugfs_add_r(mac_0);
rtw_debugfs_add_r(mac_1);
rtw_debugfs_add_r(mac_2);
@@ -1252,6 +1279,11 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_r(bb_d);
rtw_debugfs_add_r(bb_e);
rtw_debugfs_add_r(bb_f);
+}
+
+static
+void rtw_debugfs_add_sec1(struct rtw_dev *rtwdev, struct dentry *debugfs_topdir)
+{
rtw_debugfs_add_r(mac_10);
rtw_debugfs_add_r(mac_11);
rtw_debugfs_add_r(mac_12);
@@ -1274,14 +1306,29 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_r(bb_40);
rtw_debugfs_add_r(bb_41);
}
- rtw_debugfs_add_r(rf_dump);
- rtw_debugfs_add_r(tx_pwr_tbl);
- rtw_debugfs_add_rw(edcca_enable);
- rtw_debugfs_add_rw(fw_crash);
- rtw_debugfs_add_rw(force_lowest_basic_rate);
- rtw_debugfs_add_rw(dm_cap);
}
+void rtw_debugfs_init(struct rtw_dev *rtwdev)
+{
+ struct dentry *debugfs_topdir;
+
+ rtwdev->debugfs = kmemdup(&rtw_debugfs_templ, sizeof(rtw_debugfs_templ),
+ GFP_KERNEL);
+ if (!rtwdev->debugfs)
+ return;
+
+ debugfs_topdir = debugfs_create_dir("rtw88",
+ rtwdev->hw->wiphy->debugfsdir);
+
+ rtw_debugfs_add_basic(rtwdev, debugfs_topdir);
+ rtw_debugfs_add_sec0(rtwdev, debugfs_topdir);
+ rtw_debugfs_add_sec1(rtwdev, debugfs_topdir);
+}
+
+void rtw_debugfs_deinit(struct rtw_dev *rtwdev)
+{
+ kfree(rtwdev->debugfs);
+}
#endif /* CONFIG_RTW88_DEBUGFS */
#ifdef CONFIG_RTW88_DEBUG
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index eb69006c463e..6570e84d8d24 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -25,6 +25,7 @@ enum rtw_debug_mask {
RTW_DBG_HW_SCAN = 0x00010000,
RTW_DBG_STATE = 0x00020000,
RTW_DBG_SDIO = 0x00040000,
+ RTW_DBG_USB = 0x00080000,
RTW_DBG_UNEXP = 0x80000000,
RTW_DBG_ALL = 0xffffffff
@@ -33,11 +34,13 @@ enum rtw_debug_mask {
#ifdef CONFIG_RTW88_DEBUGFS
void rtw_debugfs_init(struct rtw_dev *rtwdev);
+void rtw_debugfs_deinit(struct rtw_dev *rtwdev);
void rtw_debugfs_get_simple_phy_info(struct seq_file *m);
#else
static inline void rtw_debugfs_init(struct rtw_dev *rtwdev) {}
+static inline void rtw_debugfs_deinit(struct rtw_dev *rtwdev) {}
#endif /* CONFIG_RTW88_DEBUGFS */
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index ab7d414d0ba6..b9b0114e253b 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -1468,10 +1468,12 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
val |= BIT_ENSWBCN >> 8;
rtw_write8(rtwdev, REG_CR + 1, val);
- val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
- bckp[1] = val;
- val &= ~(BIT_EN_BCNQ_DL >> 16);
- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) {
+ val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
+ bckp[1] = val;
+ val &= ~(BIT_EN_BCNQ_DL >> 16);
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
+ }
ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
if (ret) {
@@ -1496,7 +1498,8 @@ restore:
rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
rsvd_pg_head | BIT_BCN_VALID_V1);
- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
return ret;
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index 830d7532f2a3..96aeda26014e 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -18,6 +18,7 @@ struct rtw_hci_ops {
void (*deep_ps)(struct rtw_dev *rtwdev, bool enter);
void (*link_ps)(struct rtw_dev *rtwdev, bool enter);
void (*interface_cfg)(struct rtw_dev *rtwdev);
+ void (*dynamic_rx_agg)(struct rtw_dev *rtwdev, bool enable);
int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
@@ -72,6 +73,12 @@ static inline void rtw_hci_interface_cfg(struct rtw_dev *rtwdev)
rtwdev->hci.ops->interface_cfg(rtwdev);
}
+static inline void rtw_hci_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
+{
+ if (rtwdev->hci.ops->dynamic_rx_agg)
+ rtwdev->hci.ops->dynamic_rx_agg(rtwdev, enable);
+}
+
static inline int
rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 63326b352738..b39e90fb66b4 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -167,6 +167,12 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
+ rtwvif->mac_id = rtw_acquire_macid(rtwdev);
+ if (rtwvif->mac_id >= RTW_MAX_MAC_ID_NUM) {
+ mutex_unlock(&rtwdev->mutex);
+ return -ENOSPC;
+ }
+
port = find_first_zero_bit(rtwdev->hw_port, RTW_PORT_NUM);
if (port >= RTW_PORT_NUM) {
mutex_unlock(&rtwdev->mutex);
@@ -214,7 +220,8 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
- rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM mac_id %d on port %d\n",
+ vif->addr, rtwvif->mac_id, rtwvif->port);
return 0;
}
@@ -225,7 +232,8 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
- rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM mac_id %d on port %d\n",
+ vif->addr, rtwvif->mac_id, rtwvif->port);
mutex_lock(&rtwdev->mutex);
@@ -242,6 +250,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
config |= PORT_SET_BCN_CTRL;
rtw_vif_port_config(rtwdev, rtwvif, config);
clear_bit(rtwvif->port, rtwdev->hw_port);
+ rtw_release_macid(rtwdev, rtwvif->mac_id);
rtw_recalc_lps(rtwdev, NULL);
mutex_unlock(&rtwdev->mutex);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 7ab7a988b123..bbdef38c7e34 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -212,6 +212,7 @@ static void rtw_watch_dog_work(struct work_struct *work)
struct rtw_traffic_stats *stats = &rtwdev->stats;
struct rtw_watch_dog_iter_data data = {};
bool busy_traffic = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
+ u32 tx_unicast_mbps, rx_unicast_mbps;
bool ps_active;
mutex_lock(&rtwdev->mutex);
@@ -236,10 +237,11 @@ static void rtw_watch_dog_work(struct work_struct *work)
else
ps_active = false;
- ewma_tp_add(&stats->tx_ewma_tp,
- (u32)(stats->tx_unicast >> RTW_TP_SHIFT));
- ewma_tp_add(&stats->rx_ewma_tp,
- (u32)(stats->rx_unicast >> RTW_TP_SHIFT));
+ tx_unicast_mbps = stats->tx_unicast >> RTW_TP_SHIFT;
+ rx_unicast_mbps = stats->rx_unicast >> RTW_TP_SHIFT;
+
+ ewma_tp_add(&stats->tx_ewma_tp, tx_unicast_mbps);
+ ewma_tp_add(&stats->rx_ewma_tp, rx_unicast_mbps);
stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp);
stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp);
@@ -259,6 +261,9 @@ static void rtw_watch_dog_work(struct work_struct *work)
rtw_phy_dynamic_mechanism(rtwdev);
+ rtw_hci_dynamic_rx_agg(rtwdev,
+ tx_unicast_mbps >= 1 || rx_unicast_mbps >= 1);
+
data.rtwdev = rtwdev;
/* rtw_iterate_vifs internally uses an atomic iterator which is needed
* to avoid taking local->iflist_mtx mutex
@@ -306,17 +311,6 @@ static void rtw_ips_work(struct work_struct *work)
mutex_unlock(&rtwdev->mutex);
}
-static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
-{
- unsigned long mac_id;
-
- mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
- if (mac_id < RTW_MAX_MAC_ID_NUM)
- set_bit(mac_id, rtwdev->mac_id_map);
-
- return mac_id;
-}
-
static void rtw_sta_rc_work(struct work_struct *work)
{
struct rtw_sta_info *si = container_of(work, struct rtw_sta_info,
@@ -335,12 +329,14 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
int i;
- si->mac_id = rtw_acquire_macid(rtwdev);
- if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
- return -ENOSPC;
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ si->mac_id = rtwvif->mac_id;
+ } else {
+ si->mac_id = rtw_acquire_macid(rtwdev);
+ if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
+ return -ENOSPC;
+ }
- if (vif->type == NL80211_IFTYPE_STATION && vif->cfg.assoc == 0)
- rtwvif->mac_id = si->mac_id;
si->rtwdev = rtwdev;
si->sta = sta;
si->vif = vif;
@@ -365,11 +361,13 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
bool fw_exist)
{
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ struct ieee80211_vif *vif = si->vif;
int i;
cancel_work_sync(&si->rc_work);
- rtw_release_macid(rtwdev, si->mac_id);
+ if (vif->type != NL80211_IFTYPE_STATION)
+ rtw_release_macid(rtwdev, si->mac_id);
if (fw_exist)
rtw_fw_media_status_report(rtwdev, si->mac_id, false);
@@ -609,6 +607,8 @@ static void rtw_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
rtw_bf_disassoc(rtwdev, vif, NULL);
rtw_vif_assoc_changed(rtwvif, NULL);
rtw_txq_cleanup(rtwdev, vif->txq);
+
+ rtw_release_macid(rtwdev, rtwvif->mac_id);
}
void rtw_fw_recovery(struct rtw_dev *rtwdev)
@@ -1313,20 +1313,21 @@ static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev)
{
const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fw_state *fw;
+ int ret = 0;
fw = &rtwdev->fw;
wait_for_completion(&fw->completion);
if (!fw->firmware)
- return -EINVAL;
+ ret = -EINVAL;
if (chip->wow_fw_name) {
fw = &rtwdev->wow_fw;
wait_for_completion(&fw->completion);
if (!fw->firmware)
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ return ret;
}
static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev,
@@ -2005,7 +2006,7 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0;
efuse->ext_lna_2g = efuse->lna_type_2g & BIT(3) ? 1 : 0;
efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0;
- efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0;
+ efuse->ext_lna_5g = efuse->lna_type_5g & BIT(3) ? 1 : 0;
if (!is_valid_ether_addr(efuse->addr)) {
eth_random_addr(efuse->addr);
@@ -2133,7 +2134,6 @@ int rtw_core_init(struct rtw_dev *rtwdev)
rtwdev->sec.total_cam_num = 32;
rtwdev->hal.current_channel = 1;
rtwdev->dm_info.fix_rate = U8_MAX;
- set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map);
rtw_stats_init(rtwdev);
@@ -2299,6 +2299,7 @@ void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
ieee80211_unregister_hw(hw);
rtw_unset_supported_band(hw, chip);
+ rtw_debugfs_deinit(rtwdev);
}
EXPORT_SYMBOL(rtw_unregister_hw);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 49a3fd4fb7dc..945117afe143 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -50,6 +50,7 @@ extern const struct ieee80211_ops rtw_ops;
#define RTW_MAX_CHANNEL_NUM_5G 49
struct rtw_dev;
+struct rtw_debugfs;
enum rtw_hci_type {
RTW_HCI_TYPE_PCIE,
@@ -622,6 +623,7 @@ struct rtw_rx_pkt_stat {
bool crc_err;
bool decrypted;
bool is_c2h;
+ bool channel_invalid;
s32 signal_power;
u16 pkt_len;
@@ -740,7 +742,6 @@ struct rtw_txq {
unsigned long flags;
};
-#define RTW_BC_MC_MACID 1
DECLARE_EWMA(rssi, 10, 16);
struct rtw_sta_info {
@@ -803,7 +804,7 @@ struct rtw_bf_info {
struct rtw_vif {
enum rtw_net_type net_type;
u16 aid;
- u8 mac_id; /* for STA mode only */
+ u8 mac_id;
u8 mac_addr[ETH_ALEN];
u8 bssid[ETH_ALEN];
u8 port;
@@ -1785,6 +1786,8 @@ struct rtw_efuse {
bool share_ant;
u8 bt_setting;
+ u8 usb_mode_switch;
+
struct {
u8 hci;
u8 bw;
@@ -2051,7 +2054,7 @@ struct rtw_dev {
bool beacon_loss;
struct completion lps_leave_check;
- struct dentry *debugfs;
+ struct rtw_debugfs *debugfs;
u8 sta_cnt;
u32 rts_threshold;
@@ -2127,6 +2130,17 @@ static inline bool rtw_chip_has_tx_stbc(struct rtw_dev *rtwdev)
return rtwdev->chip->tx_stbc;
}
+static inline u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
+{
+ unsigned long mac_id;
+
+ mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
+ if (mac_id < RTW_MAX_MAC_ID_NUM)
+ set_bit(mac_id, rtwdev->mac_id_map);
+
+ return mac_id;
+}
+
static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
{
clear_bit(mac_id, rtwdev->mac_id_map);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index a5b9d6c7be37..0b9b8807af2c 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -1088,6 +1088,7 @@ static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
/* remove rx_desc */
skb_pull(new, pkt_offset);
+ rtw_update_rx_freq_for_invalid(rtwdev, new, &rx_status, &pkt_stat);
rtw_rx_stats(rtwdev, pkt_stat.vif, new);
memcpy(new->cb, &rx_status, sizeof(rx_status));
ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
@@ -1600,6 +1601,7 @@ static struct rtw_hci_ops rtw_pci_ops = {
.deep_ps = rtw_pci_deep_ps,
.link_ps = rtw_pci_link_ps,
.interface_cfg = rtw_pci_interface_cfg,
+ .dynamic_rx_agg = NULL,
.read8 = rtw_pci_read8,
.read16 = rtw_pci_read16,
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 02ef9a77316b..4d9b8668e8b0 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -15,6 +15,7 @@
#define BIT_WLOCK_1C_B6 BIT(5)
#define REG_SYS_PW_CTRL 0x0004
#define BIT_PFM_WOWL BIT(3)
+#define BIT_APFM_OFFMAC BIT(9)
#define REG_SYS_CLK_CTRL 0x0008
#define BIT_CPU_CLK_EN BIT(14)
@@ -133,6 +134,14 @@
#define REG_PMC_DBG_CTRL1 0xa8
#define BITS_PMC_BT_IQK_STS GENMASK(22, 21)
+#define REG_PAD_CTRL2 0x00C4
+#define BIT_RSM_EN_V1 BIT(16)
+#define BIT_NO_PDN_CHIPOFF_V1 BIT(17)
+#define BIT_MASK_USB23_SW_MODE_V1 GENMASK(19, 18)
+#define BIT_USB3_USB2_TRANSITION BIT(20)
+#define BIT_USB_MODE_U2 1
+#define BIT_USB_MODE_U3 2
+
#define REG_EFUSE_ACCESS 0x00CF
#define EFUSE_ACCESS_ON 0x69
#define EFUSE_ACCESS_OFF 0x00
@@ -313,6 +322,12 @@
#define REG_RXDMA_DPR 0x028C
#define REG_RXDMA_MODE 0x0290
#define BIT_DMA_MODE BIT(1)
+#define BIT_DMA_BURST_CNT GENMASK(3, 2)
+#define BIT_DMA_BURST_SIZE GENMASK(5, 4)
+#define BIT_DMA_BURST_SIZE_64 2
+#define BIT_DMA_BURST_SIZE_512 1
+#define BIT_DMA_BURST_SIZE_1024 0
+
#define REG_RXPKTNUM 0x02B0
#define REG_INT_MIG 0x0304
@@ -568,6 +583,8 @@
#define BIT_WL_SECURITY_CLK BIT(15)
#define BIT_DDMA_EN BIT(8)
+#define REG_SW_MDIO 0x10C0
+
#define REG_H2C_PKT_READADDR 0x10D0
#define REG_H2C_PKT_WRITEADDR 0x10D4
#define REG_FW_DBG6 0x10F8
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
index e2c7d9f87683..a019f4085e73 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
@@ -31,8 +31,6 @@ static const struct usb_device_id rtw_8821cu_id_table[] = {
.driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82b, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82c, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331d, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* D-Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc811, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 2456ff242818..6edb17aea90e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -46,6 +46,7 @@ static int rtw8822b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
map = (struct rtw8822b_efuse *)log_map;
+ efuse->usb_mode_switch = u8_get_bits(map->usb_mode, BIT(7));
efuse->rfe_option = map->rfe_option;
efuse->rf_board_option = map->rf_board_option;
efuse->crystal_cap = map->xtal_k;
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
index 2dc3a6660f06..cf85e63966a1 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
@@ -72,7 +72,9 @@ struct rtw8822bs_efuse {
struct rtw8822b_efuse {
__le16 rtl_id;
- u8 res0[0x0e];
+ u8 res0[4];
+ u8 usb_mode;
+ u8 res1[0x09];
/* power index for four RF paths */
struct rtw_txpwr_idx txpwr_idx_table[4];
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 62376d1cca22..1dbe1cdbc3fd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -49,6 +49,7 @@ static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
map = (struct rtw8822c_efuse *)log_map;
+ efuse->usb_mode_switch = u8_get_bits(map->usb_mode, BIT(7));
efuse->rfe_option = map->rfe_option;
efuse->rf_board_option = map->rf_board_option;
efuse->crystal_cap = map->xtal_k & XCAP_MASK;
@@ -2575,9 +2576,10 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
rx_power[RF_PATH_B] -= 110;
channel = GET_PHY_STAT_P0_CHANNEL(phy_status);
- if (channel == 0)
- channel = rtwdev->hal.current_channel;
- rtw_set_rx_freq_band(pkt_stat, channel);
+ if (channel != 0)
+ rtw_set_rx_freq_band(pkt_stat, channel);
+ else
+ pkt_stat->channel_invalid = true;
pkt_stat->rx_power[RF_PATH_A] = rx_power[RF_PATH_A];
pkt_stat->rx_power[RF_PATH_B] = rx_power[RF_PATH_B];
@@ -2611,12 +2613,14 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
else
rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status);
- if (rxsc >= 9 && rxsc <= 12)
+ if (rxsc == 0)
+ bw = rtwdev->hal.current_band_width;
+ else if (rxsc >= 1 && rxsc <= 8)
+ bw = RTW_CHANNEL_WIDTH_20;
+ else if (rxsc >= 9 && rxsc <= 12)
bw = RTW_CHANNEL_WIDTH_40;
- else if (rxsc >= 13)
- bw = RTW_CHANNEL_WIDTH_80;
else
- bw = RTW_CHANNEL_WIDTH_20;
+ bw = RTW_CHANNEL_WIDTH_80;
channel = GET_PHY_STAT_P1_CHANNEL(phy_status);
rtw_set_rx_freq_band(pkt_stat, channel);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index 1bc0e7f5d6bb..e2b383d633cd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -59,16 +59,18 @@ struct rtw8822ce_efuse {
struct rtw8822c_efuse {
__le16 rtl_id;
- u8 res0[0x0e];
+ u8 res0[4];
+ u8 usb_mode;
+ u8 res1[0x09];
/* power index for four RF paths */
struct rtw_txpwr_idx txpwr_idx_table[4];
u8 channel_plan; /* 0xb8 */
u8 xtal_k;
- u8 res1;
+ u8 res2;
u8 iqk_lck;
- u8 res2[5]; /* 0xbc */
+ u8 res3[5]; /* 0xbc */
u8 rf_board_option;
u8 rf_feature_option;
u8 rf_bt_setting;
@@ -80,21 +82,21 @@ struct rtw8822c_efuse {
u8 rf_antenna_option; /* 0xc9 */
u8 rfe_option;
u8 country_code[2];
- u8 res3[3];
+ u8 res4[3];
u8 path_a_thermal; /* 0xd0 */
u8 path_b_thermal;
- u8 res4[2];
+ u8 res5[2];
u8 rx_gain_gap_2g_ofdm;
- u8 res5;
- u8 rx_gain_gap_2g_cck;
u8 res6;
- u8 rx_gain_gap_5gl;
+ u8 rx_gain_gap_2g_cck;
u8 res7;
- u8 rx_gain_gap_5gm;
+ u8 rx_gain_gap_5gl;
u8 res8;
- u8 rx_gain_gap_5gh;
+ u8 rx_gain_gap_5gm;
u8 res9;
- u8 res10[0x42];
+ u8 rx_gain_gap_5gh;
+ u8 res10;
+ u8 res11[0x42];
union {
struct rtw8822ce_efuse e;
struct rtw8822cu_efuse u;
diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c
index 84aedabdf285..66f9419588cf 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.c
+++ b/drivers/net/wireless/realtek/rtw88/rx.c
@@ -146,6 +146,47 @@ static void rtw_set_rx_freq_by_pktstat(struct rtw_rx_pkt_stat *pkt_stat,
rx_status->band = pkt_stat->band;
}
+void rtw_update_rx_freq_from_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ int channel = rtwdev->hal.current_channel;
+ size_t hdr_len, ielen;
+ int channel_number;
+ u8 *variable;
+
+ if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ goto fill_rx_status;
+
+ if (ieee80211_is_beacon(mgmt->frame_control)) {
+ variable = mgmt->u.beacon.variable;
+ hdr_len = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ } else if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ variable = mgmt->u.probe_resp.variable;
+ hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+ } else {
+ goto fill_rx_status;
+ }
+
+ if (skb->len > hdr_len)
+ ielen = skb->len - hdr_len;
+ else
+ goto fill_rx_status;
+
+ channel_number = cfg80211_get_ies_channel_number(variable, ielen,
+ NL80211_BAND_2GHZ);
+ if (channel_number != -1)
+ channel = channel_number;
+
+fill_rx_status:
+ rtw_set_rx_freq_band(pkt_stat, channel);
+ rtw_set_rx_freq_by_pktstat(pkt_stat, rx_status);
+}
+EXPORT_SYMBOL(rtw_update_rx_freq_from_ie);
+
void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
struct rtw_rx_pkt_stat *pkt_stat,
struct ieee80211_hdr *hdr,
diff --git a/drivers/net/wireless/realtek/rtw88/rx.h b/drivers/net/wireless/realtek/rtw88/rx.h
index d3668c4efc24..9f0019112987 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.h
+++ b/drivers/net/wireless/realtek/rtw88/rx.h
@@ -41,7 +41,7 @@ enum rtw_rx_desc_enc {
#define GET_RX_DESC_TSFL(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x05), GENMASK(31, 0))
#define GET_RX_DESC_BW(rxdesc) \
- (le32_get_bits(*((__le32 *)(rxdesc) + 0x04), GENMASK(31, 24)))
+ (le32_get_bits(*((__le32 *)(rxdesc) + 0x04), GENMASK(5, 4)))
void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct sk_buff *skb);
@@ -50,5 +50,18 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *rx_status,
u8 *phy_status);
+void rtw_update_rx_freq_from_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status,
+ struct rtw_rx_pkt_stat *pkt_stat);
+
+static inline
+void rtw_update_rx_freq_for_invalid(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ if (pkt_stat->channel_invalid)
+ rtw_update_rx_freq_from_ie(rtwdev, skb, rx_status, pkt_stat);
+}
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index 0cae5746f540..21d0754dd7f6 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -948,6 +948,7 @@ static void rtw_sdio_rx_skb(struct rtw_dev *rtwdev, struct sk_buff *skb,
skb_put(skb, pkt_stat->pkt_len);
skb_reserve(skb, pkt_offset);
+ rtw_update_rx_freq_for_invalid(rtwdev, skb, rx_status, pkt_stat);
rtw_rx_stats(rtwdev, pkt_stat->vif, skb);
ieee80211_rx_irqsafe(rtwdev->hw, skb);
@@ -1156,6 +1157,7 @@ static struct rtw_hci_ops rtw_sdio_ops = {
.deep_ps = rtw_sdio_deep_ps,
.link_ps = rtw_sdio_link_ps,
.interface_cfg = rtw_sdio_interface_cfg,
+ .dynamic_rx_agg = NULL,
.read8 = rtw_sdio_read8,
.read16 = rtw_sdio_read16,
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index c02ac673be32..dae7ca148865 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -46,7 +46,8 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
le32_encode_bits(pkt_info->ls, RTW_TX_DESC_W0_LS) |
le32_encode_bits(pkt_info->dis_qselseq, RTW_TX_DESC_W0_DISQSELSEQ);
- tx_desc->w1 = le32_encode_bits(pkt_info->qsel, RTW_TX_DESC_W1_QSEL) |
+ tx_desc->w1 = le32_encode_bits(pkt_info->mac_id, RTW_TX_DESC_W1_MACID) |
+ le32_encode_bits(pkt_info->qsel, RTW_TX_DESC_W1_QSEL) |
le32_encode_bits(pkt_info->rate_id, RTW_TX_DESC_W1_RATE_ID) |
le32_encode_bits(pkt_info->sec_type, RTW_TX_DESC_W1_SEC_TYPE) |
le32_encode_bits(pkt_info->pkt_offset, RTW_TX_DESC_W1_PKT_OFFSET) |
@@ -401,14 +402,18 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_vif *vif = info->control.vif;
struct rtw_sta_info *si;
- struct ieee80211_vif *vif = NULL;
+ struct rtw_vif *rtwvif;
__le16 fc = hdr->frame_control;
bool bmc;
if (sta) {
si = (struct rtw_sta_info *)sta->drv_priv;
- vif = si->vif;
+ pkt_info->mac_id = si->mac_id;
+ } else if (vif) {
+ rtwvif = (struct rtw_vif *)vif->drv_priv;
+ pkt_info->mac_id = rtwvif->mac_id;
}
if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index 324189606257..3d544fd7f60f 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -27,6 +27,7 @@ struct rtw_tx_desc {
#define RTW_TX_DESC_W0_BMC BIT(24)
#define RTW_TX_DESC_W0_LS BIT(26)
#define RTW_TX_DESC_W0_DISQSELSEQ BIT(31)
+#define RTW_TX_DESC_W1_MACID GENMASK(7, 0)
#define RTW_TX_DESC_W1_QSEL GENMASK(12, 8)
#define RTW_TX_DESC_W1_RATE_ID GENMASK(20, 16)
#define RTW_TX_DESC_W1_SEC_TYPE GENMASK(23, 22)
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index a55ca5a24227..e83ab6fb83f5 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -14,6 +14,11 @@
#include "ps.h"
#include "usb.h"
+static bool rtw_switch_usb_mode = true;
+module_param_named(switch_usb_mode, rtw_switch_usb_mode, bool, 0644);
+MODULE_PARM_DESC(switch_usb_mode,
+ "Set to N to disable switching to USB 3 mode to avoid potential interference in the 2.4 GHz band (default: Y)");
+
#define RTW_USB_MAX_RXQ_LEN 512
struct rtw_usb_txcb {
@@ -541,11 +546,12 @@ static void rtw_usb_rx_handler(struct work_struct *work)
struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, rx_work);
struct rtw_dev *rtwdev = rtwusb->rtwdev;
const struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_rx_pkt_stat pkt_stat;
+ u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
struct ieee80211_rx_status rx_status;
+ u32 pkt_offset, next_pkt, urb_len;
+ struct rtw_rx_pkt_stat pkt_stat;
+ struct sk_buff *next_skb;
struct sk_buff *skb;
- u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
- u32 pkt_offset;
u8 *rx_desc;
int limit;
@@ -554,28 +560,48 @@ static void rtw_usb_rx_handler(struct work_struct *work)
if (!skb)
break;
- rx_desc = skb->data;
- chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat,
- &rx_status);
- pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
- pkt_stat.shift;
-
- if (pkt_stat.is_c2h) {
- skb_put(skb, pkt_stat.pkt_len + pkt_offset);
- rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb);
- continue;
- }
-
if (skb_queue_len(&rtwusb->rx_queue) >= RTW_USB_MAX_RXQ_LEN) {
dev_dbg_ratelimited(rtwdev->dev, "failed to get rx_queue, overflow\n");
dev_kfree_skb_any(skb);
continue;
}
- skb_put(skb, pkt_stat.pkt_len);
- skb_reserve(skb, pkt_offset);
- memcpy(skb->cb, &rx_status, sizeof(rx_status));
- ieee80211_rx_irqsafe(rtwdev->hw, skb);
+ urb_len = skb->len;
+
+ do {
+ rx_desc = skb->data;
+ chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat,
+ &rx_status);
+ pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
+ pkt_stat.shift;
+
+ next_pkt = round_up(pkt_stat.pkt_len + pkt_offset, 8);
+
+ if (urb_len >= next_pkt + pkt_desc_sz)
+ next_skb = skb_clone(skb, GFP_KERNEL);
+ else
+ next_skb = NULL;
+
+ if (pkt_stat.is_c2h) {
+ skb_trim(skb, pkt_stat.pkt_len + pkt_offset);
+ rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb);
+ } else {
+ skb_pull(skb, pkt_offset);
+ skb_trim(skb, pkt_stat.pkt_len);
+ rtw_update_rx_freq_for_invalid(rtwdev, skb,
+ &rx_status,
+ &pkt_stat);
+ rtw_rx_stats(rtwdev, pkt_stat.vif, skb);
+ memcpy(skb->cb, &rx_status, sizeof(rx_status));
+ ieee80211_rx_irqsafe(rtwdev->hw, skb);
+ }
+
+ skb = next_skb;
+ if (skb)
+ skb_pull(skb, next_pkt);
+
+ urb_len -= next_pkt;
+ } while (skb);
}
}
@@ -619,6 +645,7 @@ static void rtw_usb_read_port_complete(struct urb *urb)
if (skb)
dev_kfree_skb_any(skb);
} else {
+ skb_put(skb, urb->actual_length);
skb_queue_tail(&rtwusb->rx_queue, skb);
queue_work(rtwusb->rxwq, &rtwusb->rx_work);
}
@@ -713,9 +740,69 @@ static void rtw_usb_link_ps(struct rtw_dev *rtwdev, bool enter)
/* empty function for rtw_hci_ops */
}
+static void rtw_usb_init_burst_pkt_len(struct rtw_dev *rtwdev)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ enum usb_device_speed speed = rtwusb->udev->speed;
+ u8 rxdma, burst_size;
+
+ rxdma = BIT_DMA_BURST_CNT | BIT_DMA_MODE;
+
+ if (speed == USB_SPEED_SUPER)
+ burst_size = BIT_DMA_BURST_SIZE_1024;
+ else if (speed == USB_SPEED_HIGH)
+ burst_size = BIT_DMA_BURST_SIZE_512;
+ else
+ burst_size = BIT_DMA_BURST_SIZE_64;
+
+ u8p_replace_bits(&rxdma, burst_size, BIT_DMA_BURST_SIZE);
+
+ rtw_write8(rtwdev, REG_RXDMA_MODE, rxdma);
+ rtw_write16_set(rtwdev, REG_TXDMA_OFFSET_CHK, BIT_DROP_DATA_EN);
+}
+
static void rtw_usb_interface_cfg(struct rtw_dev *rtwdev)
{
- /* empty function for rtw_hci_ops */
+ rtw_usb_init_burst_pkt_len(rtwdev);
+}
+
+static void rtw_usb_dynamic_rx_agg_v1(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 size, timeout;
+ u16 val16;
+
+ rtw_write32_set(rtwdev, REG_RXDMA_AGG_PG_TH, BIT_EN_PRE_CALC);
+ rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN);
+ rtw_write8_clr(rtwdev, REG_RXDMA_AGG_PG_TH + 3, BIT(7));
+
+ if (enable) {
+ size = 0x5;
+ timeout = 0x20;
+ } else {
+ size = 0x0;
+ timeout = 0x1;
+ }
+ val16 = u16_encode_bits(size, BIT_RXDMA_AGG_PG_TH) |
+ u16_encode_bits(timeout, BIT_DMA_AGG_TO_V1);
+
+ rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16);
+}
+
+static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
+{
+ switch (rtwdev->chip->id) {
+ case RTW_CHIP_TYPE_8822C:
+ case RTW_CHIP_TYPE_8822B:
+ case RTW_CHIP_TYPE_8821C:
+ rtw_usb_dynamic_rx_agg_v1(rtwdev, enable);
+ break;
+ case RTW_CHIP_TYPE_8723D:
+ /* Doesn't like aggregation. */
+ break;
+ case RTW_CHIP_TYPE_8703B:
+ /* Likely not found in USB devices. */
+ break;
+ }
}
static struct rtw_hci_ops rtw_usb_ops = {
@@ -727,6 +814,7 @@ static struct rtw_hci_ops rtw_usb_ops = {
.deep_ps = rtw_usb_deep_ps,
.link_ps = rtw_usb_link_ps,
.interface_cfg = rtw_usb_interface_cfg,
+ .dynamic_rx_agg = rtw_usb_dynamic_rx_agg,
.write8 = rtw_usb_write8,
.write16 = rtw_usb_write16,
@@ -841,6 +929,77 @@ static void rtw_usb_intf_deinit(struct rtw_dev *rtwdev,
usb_set_intfdata(intf, NULL);
}
+static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev)
+{
+ enum usb_device_speed cur_speed;
+ u8 id = rtwdev->chip->id;
+ bool can_switch;
+ u32 pad_ctrl2;
+
+ if (rtw_read8(rtwdev, REG_SYS_CFG2 + 3) == 0x20)
+ cur_speed = USB_SPEED_SUPER;
+ else
+ cur_speed = USB_SPEED_HIGH;
+
+ if (cur_speed == USB_SPEED_SUPER)
+ return 0;
+
+ pad_ctrl2 = rtw_read32(rtwdev, REG_PAD_CTRL2);
+
+ can_switch = !!(pad_ctrl2 & (BIT_MASK_USB23_SW_MODE_V1 |
+ BIT_USB3_USB2_TRANSITION));
+
+ if (!can_switch) {
+ rtw_dbg(rtwdev, RTW_DBG_USB,
+ "Switching to USB 3 mode unsupported by the chip\n");
+ return 0;
+ }
+
+ /* At this point cur_speed is USB_SPEED_HIGH. If we already tried
+ * to switch don't try again - it's a USB 2 port.
+ */
+ if (u32_get_bits(pad_ctrl2, BIT_MASK_USB23_SW_MODE_V1) == BIT_USB_MODE_U3)
+ return 0;
+
+ /* Enable IO wrapper timeout */
+ if (id == RTW_CHIP_TYPE_8822B || id == RTW_CHIP_TYPE_8821C)
+ rtw_write8_clr(rtwdev, REG_SW_MDIO + 3, BIT(0));
+
+ u32p_replace_bits(&pad_ctrl2, BIT_USB_MODE_U3, BIT_MASK_USB23_SW_MODE_V1);
+ pad_ctrl2 |= BIT_RSM_EN_V1;
+
+ rtw_write32(rtwdev, REG_PAD_CTRL2, pad_ctrl2);
+ rtw_write8(rtwdev, REG_PAD_CTRL2 + 1, 4);
+
+ rtw_write16_set(rtwdev, REG_SYS_PW_CTRL, BIT_APFM_OFFMAC);
+ usleep_range(1000, 1001);
+ rtw_write32_set(rtwdev, REG_PAD_CTRL2, BIT_NO_PDN_CHIPOFF_V1);
+
+ return 1;
+}
+
+static int rtw_usb_switch_mode(struct rtw_dev *rtwdev)
+{
+ u8 id = rtwdev->chip->id;
+
+ if (id != RTW_CHIP_TYPE_8822C && id != RTW_CHIP_TYPE_8822B)
+ return 0;
+
+ if (!rtwdev->efuse.usb_mode_switch) {
+ rtw_dbg(rtwdev, RTW_DBG_USB,
+ "Switching to USB 3 mode disabled by chip's efuse\n");
+ return 0;
+ }
+
+ if (!rtw_switch_usb_mode) {
+ rtw_dbg(rtwdev, RTW_DBG_USB,
+ "Switching to USB 3 mode disabled by module parameter\n");
+ return 0;
+ }
+
+ return rtw_usb_switch_mode_new(rtwdev);
+}
+
int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct rtw_dev *rtwdev;
@@ -896,6 +1055,14 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto err_destroy_rxwq;
}
+ ret = rtw_usb_switch_mode(rtwdev);
+ if (ret) {
+ /* Not a fail, but we do need to skip rtw_register_hw. */
+ rtw_dbg(rtwdev, RTW_DBG_USB, "switching to USB 3 mode\n");
+ ret = 0;
+ goto err_destroy_rxwq;
+ }
+
ret = rtw_register_hw(rtwdev, rtwdev->hw);
if (ret) {
rtw_err(rtwdev, "failed to register hw\n");
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index 3c9f864805b1..d2a3361669d7 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -12,6 +12,7 @@ if RTW89
config RTW89_CORE
tristate
+ select WANT_DEV_COREDUMP
config RTW89_PCI
tristate
@@ -28,6 +29,9 @@ config RTW89_8852B_COMMON
config RTW89_8852B
tristate
+config RTW89_8852BT
+ tristate
+
config RTW89_8852C
tristate
@@ -68,6 +72,18 @@ config RTW89_8852BE
802.11ax PCIe wireless network (Wi-Fi 6) adapter
+config RTW89_8852BTE
+ tristate "Realtek 8852BE-VT PCI wireless network (Wi-Fi 6) adapter"
+ depends on PCI
+ select RTW89_CORE
+ select RTW89_PCI
+ select RTW89_8852BT
+ select RTW89_8852B_COMMON
+ help
+ Select this option will enable support for 8852BE-VT chipset
+
+ 802.11ax PCIe wireless network (Wi-Fi 6) adapter
+
config RTW89_8852CE
tristate "Realtek 8852CE PCI wireless network (Wi-Fi 6E) adapter"
depends on PCI
diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile
index 1f1050a7a89d..c751013e811e 100644
--- a/drivers/net/wireless/realtek/rtw89/Makefile
+++ b/drivers/net/wireless/realtek/rtw89/Makefile
@@ -52,6 +52,14 @@ rtw89_8852b-objs := rtw8852b.o \
obj-$(CONFIG_RTW89_8852BE) += rtw89_8852be.o
rtw89_8852be-objs := rtw8852be.o
+obj-$(CONFIG_RTW89_8852BT) += rtw89_8852bt.o
+rtw89_8852bt-objs := rtw8852bt.o \
+ rtw8852bt_rfk.o \
+ rtw8852bt_rfk_table.o
+
+obj-$(CONFIG_RTW89_8852BTE) += rtw89_8852bte.o
+rtw89_8852bte-objs := rtw8852bte.o
+
obj-$(CONFIG_RTW89_8852C) += rtw89_8852c.o
rtw89_8852c-objs := rtw8852c.o \
rtw8852c_table.o \
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 4557c6e035a9..4476fc7e53db 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -384,20 +384,24 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
break;
case WLAN_CIPHER_SUITE_CCMP:
hw_key_type = RTW89_SEC_KEY_TYPE_CCMP128;
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ if (!chip->hw_mgmt_tx_encrypt)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
break;
case WLAN_CIPHER_SUITE_CCMP_256:
hw_key_type = RTW89_SEC_KEY_TYPE_CCMP256;
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ if (!chip->hw_mgmt_tx_encrypt)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ext_key = true;
break;
case WLAN_CIPHER_SUITE_GCMP:
hw_key_type = RTW89_SEC_KEY_TYPE_GCMP128;
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ if (!chip->hw_mgmt_tx_encrypt)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
break;
case WLAN_CIPHER_SUITE_GCMP_256:
hw_key_type = RTW89_SEC_KEY_TYPE_GCMP256;
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ if (!chip->hw_mgmt_tx_encrypt)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ext_key = true;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index 7f90d93dcdc0..7070c85e2c28 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -124,12 +124,12 @@ void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
}
bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx,
+ enum rtw89_chanctx_idx idx,
const struct rtw89_chan *new)
{
struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_chan *chan = &hal->sub[idx].chan;
- struct rtw89_chan_rcd *rcd = &hal->sub[idx].rcd;
+ struct rtw89_chan *chan = &hal->chanctx[idx].chan;
+ struct rtw89_chan_rcd *rcd = &hal->chanctx[idx].rcd;
bool band_changed;
rcd->prev_primary_channel = chan->primary_channel;
@@ -153,7 +153,7 @@ int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev,
lockdep_assert_held(&rtwdev->mutex);
- for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY) {
+ for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_CHANCTX) {
chan = rtw89_chan_get(rtwdev, idx);
ret = iterator(chan, data);
if (ret)
@@ -164,36 +164,36 @@ int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev,
}
static void __rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx,
+ enum rtw89_chanctx_idx idx,
const struct cfg80211_chan_def *chandef,
bool from_stack)
{
struct rtw89_hal *hal = &rtwdev->hal;
- hal->sub[idx].chandef = *chandef;
+ hal->chanctx[idx].chandef = *chandef;
if (from_stack)
set_bit(idx, hal->entity_map);
}
void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx,
+ enum rtw89_chanctx_idx idx,
const struct cfg80211_chan_def *chandef)
{
__rtw89_config_entity_chandef(rtwdev, idx, chandef, true);
}
void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx,
+ enum rtw89_chanctx_idx idx,
const struct cfg80211_chan_def *chandef)
{
struct rtw89_hal *hal = &rtwdev->hal;
- enum rtw89_sub_entity_idx cur;
+ enum rtw89_chanctx_idx cur;
if (chandef) {
- cur = atomic_cmpxchg(&hal->roc_entity_idx,
- RTW89_SUB_ENTITY_IDLE, idx);
- if (cur != RTW89_SUB_ENTITY_IDLE) {
+ cur = atomic_cmpxchg(&hal->roc_chanctx_idx,
+ RTW89_CHANCTX_IDLE, idx);
+ if (cur != RTW89_CHANCTX_IDLE) {
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
"ROC still processing on entity %d\n", idx);
return;
@@ -201,12 +201,12 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
hal->roc_chandef = *chandef;
} else {
- cur = atomic_cmpxchg(&hal->roc_entity_idx, idx,
- RTW89_SUB_ENTITY_IDLE);
+ cur = atomic_cmpxchg(&hal->roc_chanctx_idx, idx,
+ RTW89_CHANCTX_IDLE);
if (cur == idx)
return;
- if (cur == RTW89_SUB_ENTITY_IDLE)
+ if (cur == RTW89_CHANCTX_IDLE)
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
"ROC already finished on entity %d\n", idx);
else
@@ -220,7 +220,7 @@ static void rtw89_config_default_chandef(struct rtw89_dev *rtwdev)
struct cfg80211_chan_def chandef = {0};
rtw89_get_default_chandef(&chandef);
- __rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0, &chandef, false);
+ __rtw89_config_entity_chandef(rtwdev, RTW89_CHANCTX_0, &chandef, false);
}
void rtw89_entity_init(struct rtw89_dev *rtwdev)
@@ -228,9 +228,9 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
struct rtw89_hal *hal = &rtwdev->hal;
hal->entity_pause = false;
- bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ bitmap_zero(hal->entity_map, NUM_OF_RTW89_CHANCTX);
bitmap_zero(hal->changes, NUM_OF_RTW89_CHANCTX_CHANGES);
- atomic_set(&hal->roc_entity_idx, RTW89_SUB_ENTITY_IDLE);
+ atomic_set(&hal->roc_chanctx_idx, RTW89_CHANCTX_IDLE);
rtw89_config_default_chandef(rtwdev);
}
@@ -242,8 +242,8 @@ static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif;
int idx;
- for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY) {
- cfg = hal->sub[idx].cfg;
+ for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_CHANCTX) {
+ cfg = hal->chanctx[idx].cfg;
if (!cfg) {
/* doesn't run with chanctx ops; one channel at most */
w->active_chanctxs = 1;
@@ -262,7 +262,7 @@ static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev,
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
{
- DECLARE_BITMAP(recalc_map, NUM_OF_RTW89_SUB_ENTITY) = {};
+ DECLARE_BITMAP(recalc_map, NUM_OF_RTW89_CHANCTX) = {};
struct rtw89_hal *hal = &rtwdev->hal;
const struct cfg80211_chan_def *chandef;
struct rtw89_entity_weight w = {};
@@ -272,23 +272,23 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
lockdep_assert_held(&rtwdev->mutex);
- bitmap_copy(recalc_map, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ bitmap_copy(recalc_map, hal->entity_map, NUM_OF_RTW89_CHANCTX);
rtw89_entity_calculate_weight(rtwdev, &w);
switch (w.active_chanctxs) {
default:
rtw89_warn(rtwdev, "unknown ent chanctxs weight: %d\n",
w.active_chanctxs);
- bitmap_zero(recalc_map, NUM_OF_RTW89_SUB_ENTITY);
+ bitmap_zero(recalc_map, NUM_OF_RTW89_CHANCTX);
fallthrough;
case 0:
rtw89_config_default_chandef(rtwdev);
- set_bit(RTW89_SUB_ENTITY_0, recalc_map);
+ set_bit(RTW89_CHANCTX_0, recalc_map);
fallthrough;
case 1:
mode = RTW89_ENTITY_MODE_SCC;
break;
- case 2 ... NUM_OF_RTW89_SUB_ENTITY:
+ case 2 ... NUM_OF_RTW89_CHANCTX:
if (w.active_roles != NUM_OF_RTW89_MCC_ROLES) {
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"unhandled ent: %d chanctxs %d roles\n",
@@ -304,7 +304,7 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
break;
}
- for_each_set_bit(idx, recalc_map, NUM_OF_RTW89_SUB_ENTITY) {
+ for_each_set_bit(idx, recalc_map, NUM_OF_RTW89_CHANCTX) {
chandef = rtw89_chandef_get(rtwdev, idx);
rtw89_get_channel_params(chandef, &chan);
if (chan.channel == 0) {
@@ -650,7 +650,7 @@ static int rtw89_mcc_fill_role(struct rtw89_dev *rtwdev,
role->duration = role->beacon_interval / 2;
- chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
role->is_2ghz = chan->band_type == RTW89_BAND_2G;
role->is_go = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_GO;
role->is_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
@@ -678,10 +678,10 @@ static void rtw89_mcc_fill_bt_role(struct rtw89_dev *rtwdev)
}
struct rtw89_mcc_fill_role_selector {
- struct rtw89_vif *bind_vif[NUM_OF_RTW89_SUB_ENTITY];
+ struct rtw89_vif *bind_vif[NUM_OF_RTW89_CHANCTX];
};
-static_assert((u8)NUM_OF_RTW89_SUB_ENTITY >= NUM_OF_RTW89_MCC_ROLES);
+static_assert((u8)NUM_OF_RTW89_CHANCTX >= NUM_OF_RTW89_MCC_ROLES);
static int rtw89_mcc_fill_role_iterator(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *mcc_role,
@@ -719,14 +719,14 @@ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev)
if (!rtwvif->chanctx_assigned)
continue;
- if (sel.bind_vif[rtwvif->sub_entity_idx]) {
+ if (sel.bind_vif[rtwvif->chanctx_idx]) {
rtw89_warn(rtwdev,
"MCC skip extra vif <macid %d> on chanctx[%d]\n",
- rtwvif->mac_id, rtwvif->sub_entity_idx);
+ rtwvif->mac_id, rtwvif->chanctx_idx);
continue;
}
- sel.bind_vif[rtwvif->sub_entity_idx] = rtwvif;
+ sel.bind_vif[rtwvif->chanctx_idx] = rtwvif;
}
ret = rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_fill_role_iterator, &sel);
@@ -1390,7 +1390,7 @@ static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *ro
const struct rtw89_chan *chan;
int ret;
- chan = rtw89_chan_get(rtwdev, role->rtwvif->sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, role->rtwvif->chanctx_idx);
req.central_ch_seg0 = chan->channel;
req.primary_ch = chan->primary_channel;
req.bandwidth = chan->band_width;
@@ -1448,7 +1448,7 @@ void __mrc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role,
slot_arg->duration = role->duration;
slot_arg->role_num = 1;
- chan = rtw89_chan_get(rtwdev, role->rtwvif->sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, role->rtwvif->chanctx_idx);
slot_arg->roles[0].role_type = RTW89_H2C_MRC_ROLE_WIFI;
slot_arg->roles[0].is_master = role == ref;
@@ -1934,22 +1934,53 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
return 0;
}
+struct rtw89_mcc_stop_sel {
+ u8 mac_id;
+ u8 slot_idx;
+};
+
+static void rtw89_mcc_stop_sel_fill(struct rtw89_mcc_stop_sel *sel,
+ const struct rtw89_mcc_role *mcc_role)
+{
+ sel->mac_id = mcc_role->rtwvif->mac_id;
+ sel->slot_idx = mcc_role->slot_idx;
+}
+
+static int rtw89_mcc_stop_sel_iterator(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_role *mcc_role,
+ unsigned int ordered_idx,
+ void *data)
+{
+ struct rtw89_mcc_stop_sel *sel = data;
+
+ if (!mcc_role->rtwvif->chanctx_assigned)
+ return 0;
+
+ rtw89_mcc_stop_sel_fill(sel, mcc_role);
+ return 1; /* break iteration */
+}
+
static void rtw89_mcc_stop(struct rtw89_dev *rtwdev)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_stop_sel sel;
int ret;
- rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop\n");
+ /* by default, stop at ref */
+ rtw89_mcc_stop_sel_fill(&sel, ref);
+ rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_stop_sel_iterator, &sel);
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop at <macid %d>\n", sel.mac_id);
if (rtw89_concurrent_via_mrc(rtwdev)) {
- ret = rtw89_fw_h2c_mrc_del(rtwdev, mcc->group);
+ ret = rtw89_fw_h2c_mrc_del(rtwdev, mcc->group, sel.slot_idx);
if (ret)
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MRC h2c failed to trigger del: %d\n", ret);
} else {
ret = rtw89_fw_h2c_stop_mcc(rtwdev, mcc->group,
- ref->rtwvif->mac_id, true);
+ sel.mac_id, true);
if (ret)
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC h2c failed to trigger stop: %d\n", ret);
@@ -2339,9 +2370,9 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
rtw89_queue_chanctx_work(rtwdev);
}
-static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx1,
- enum rtw89_sub_entity_idx idx2)
+static void rtw89_swap_chanctx(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_idx idx1,
+ enum rtw89_chanctx_idx idx2)
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_vif *rtwvif;
@@ -2350,25 +2381,25 @@ static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev,
if (idx1 == idx2)
return;
- hal->sub[idx1].cfg->idx = idx2;
- hal->sub[idx2].cfg->idx = idx1;
+ hal->chanctx[idx1].cfg->idx = idx2;
+ hal->chanctx[idx2].cfg->idx = idx1;
- swap(hal->sub[idx1], hal->sub[idx2]);
+ swap(hal->chanctx[idx1], hal->chanctx[idx2]);
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
if (!rtwvif->chanctx_assigned)
continue;
- if (rtwvif->sub_entity_idx == idx1)
- rtwvif->sub_entity_idx = idx2;
- else if (rtwvif->sub_entity_idx == idx2)
- rtwvif->sub_entity_idx = idx1;
+ if (rtwvif->chanctx_idx == idx1)
+ rtwvif->chanctx_idx = idx2;
+ else if (rtwvif->chanctx_idx == idx2)
+ rtwvif->chanctx_idx = idx1;
}
- cur = atomic_read(&hal->roc_entity_idx);
+ cur = atomic_read(&hal->roc_chanctx_idx);
if (cur == idx1)
- atomic_set(&hal->roc_entity_idx, idx2);
+ atomic_set(&hal->roc_chanctx_idx, idx2);
else if (cur == idx2)
- atomic_set(&hal->roc_entity_idx, idx1);
+ atomic_set(&hal->roc_chanctx_idx, idx1);
}
int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
@@ -2379,14 +2410,14 @@ int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
const struct rtw89_chip_info *chip = rtwdev->chip;
u8 idx;
- idx = find_first_zero_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ idx = find_first_zero_bit(hal->entity_map, NUM_OF_RTW89_CHANCTX);
if (idx >= chip->support_chanctx_num)
return -ENOENT;
rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
cfg->idx = idx;
cfg->ref_count = 0;
- hal->sub[idx].cfg = cfg;
+ hal->chanctx[idx].cfg = cfg;
return 0;
}
@@ -2419,19 +2450,19 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
struct rtw89_entity_weight w = {};
- rtwvif->sub_entity_idx = cfg->idx;
+ rtwvif->chanctx_idx = cfg->idx;
rtwvif->chanctx_assigned = true;
cfg->ref_count++;
- if (cfg->idx == RTW89_SUB_ENTITY_0)
+ if (cfg->idx == RTW89_CHANCTX_0)
goto out;
rtw89_entity_calculate_weight(rtwdev, &w);
if (w.active_chanctxs != 1)
goto out;
- /* put the first active chanctx at RTW89_SUB_ENTITY_0 */
- rtw89_swap_sub_entity(rtwdev, cfg->idx, RTW89_SUB_ENTITY_0);
+ /* put the first active chanctx at RTW89_CHANCTX_0 */
+ rtw89_swap_chanctx(rtwdev, cfg->idx, RTW89_CHANCTX_0);
out:
return rtw89_set_channel(rtwdev);
@@ -2443,47 +2474,60 @@ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
{
struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_entity_weight w = {};
- enum rtw89_sub_entity_idx roll;
+ enum rtw89_chanctx_idx roll;
enum rtw89_entity_mode cur;
+ enum rtw89_entity_mode new;
+ int ret;
- rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
+ rtwvif->chanctx_idx = RTW89_CHANCTX_0;
rtwvif->chanctx_assigned = false;
cfg->ref_count--;
if (cfg->ref_count != 0)
goto out;
- if (cfg->idx != RTW89_SUB_ENTITY_0)
+ if (cfg->idx != RTW89_CHANCTX_0)
goto out;
- roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY,
+ roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_CHANCTX,
cfg->idx + 1);
/* Follow rtw89_config_default_chandef() when rtw89_entity_recalc(). */
- if (roll == NUM_OF_RTW89_SUB_ENTITY)
+ if (roll == NUM_OF_RTW89_CHANCTX)
goto out;
- /* RTW89_SUB_ENTITY_0 is going to release, and another exists.
- * Make another roll down to RTW89_SUB_ENTITY_0 to replace.
+ /* RTW89_CHANCTX_0 is going to release, and another exists.
+ * Make another roll down to RTW89_CHANCTX_0 to replace.
*/
- rtw89_swap_sub_entity(rtwdev, cfg->idx, roll);
+ rtw89_swap_chanctx(rtwdev, cfg->idx, roll);
out:
- rtw89_entity_calculate_weight(rtwdev, &w);
+ if (!hal->entity_pause) {
+ cur = rtw89_get_entity_mode(rtwdev);
+ switch (cur) {
+ case RTW89_ENTITY_MODE_MCC:
+ rtw89_mcc_stop(rtwdev);
+ break;
+ default:
+ break;
+ }
+ }
+
+ ret = rtw89_set_channel(rtwdev);
+ if (ret)
+ return;
+
+ if (hal->entity_pause)
+ return;
- cur = rtw89_get_entity_mode(rtwdev);
- switch (cur) {
+ new = rtw89_get_entity_mode(rtwdev);
+ switch (new) {
case RTW89_ENTITY_MODE_MCC:
- /* If still multi-roles, re-plan MCC for chanctx changes.
- * Otherwise, just stop MCC.
- */
- rtw89_mcc_stop(rtwdev);
- if (w.active_roles == NUM_OF_RTW89_MCC_ROLES)
- rtw89_mcc_start(rtwdev);
+ /* re-plan MCC for chanctx changes. */
+ ret = rtw89_mcc_start(rtwdev);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to start MCC: %d\n", ret);
break;
default:
break;
}
-
- rtw89_set_channel(rtwdev);
}
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index 5278ff8c513b..c6d31984e575 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -76,17 +76,17 @@ static inline void rtw89_set_entity_mode(struct rtw89_dev *rtwdev,
void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
enum rtw89_band band, enum rtw89_bandwidth bandwidth);
bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx,
+ enum rtw89_chanctx_idx idx,
const struct rtw89_chan *new);
int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev,
int (*iterator)(const struct rtw89_chan *chan,
void *data),
void *data);
void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx,
+ enum rtw89_chanctx_idx idx,
const struct cfg80211_chan_def *chandef);
void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx,
+ enum rtw89_chanctx_idx idx,
const struct cfg80211_chan_def *chandef);
void rtw89_entity_init(struct rtw89_dev *rtwdev);
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index 24929ef534e0..df51b29142aa 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -129,6 +129,13 @@ static const u32 cxtbl[] = {
static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
/* firmware version must be in decreasing order for each chip */
+ {RTL8852BT, RTW89_FW_VER_CODE(0, 29, 90, 0),
+ .fcxbtcrpt = 7, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
+ .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
+ .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
+ .fwlrole = 7, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7,
+ .fwevntrptl = 1, .drvinfo_type = 1, .info_buf = 1800, .max_role_num = 6,
+ },
{RTL8922A, RTW89_FW_VER_CODE(0, 35, 8, 0),
.fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
.fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
@@ -1351,6 +1358,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfinfo = &pfwinfo->rpt_ctrl.finfo.v8;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v8);
break;
+ } else if (ver->fcxbtcrpt == 7) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v7);
+ break;
} else {
goto err;
}
@@ -1655,6 +1666,38 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfwinfo->event[BTF_EVNT_RPT]);
dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
+ } else if (ver->fcxbtcrpt == 7) {
+ prpt->v7 = pfwinfo->rpt_ctrl.finfo.v7;
+ pfwinfo->rpt_en_map = le32_to_cpu(prpt->v7.rpt_info.en);
+ wl->ver_info.fw_coex = le32_to_cpu(prpt->v7.rpt_info.cx_ver);
+ wl->ver_info.fw = le32_to_cpu(prpt->v7.rpt_info.fw_ver);
+
+ for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ memcpy(&dm->gnt.band[i], &prpt->v7.gnt_val[i][0],
+ sizeof(dm->gnt.band[i]));
+
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_HI_TX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_HI_RX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_LO_TX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_LO_RX_V105]);
+
+ val1 = le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_POLLUTED_V105]);
+ if (val1 > btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW])
+ val1 -= btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW]; /* diff */
+
+ btc->cx.cnt_bt[BTC_BCNT_POLUT_DIFF] = val1;
+ btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_POLLUTED_V105]);
+
+ val1 = pfwinfo->event[BTF_EVNT_RPT];
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_HANG, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_HANG, val1);
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_FW_VER_MATCH, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_BTTX_HANG, 0);
} else if (ver->fcxbtcrpt == 8) {
prpt->v8 = pfwinfo->rpt_ctrl.finfo.v8;
pfwinfo->rpt_en_map = le32_to_cpu(prpt->v8.rpt_info.en);
@@ -2397,7 +2440,7 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
if (val == fwinfo->rpt_en_map)
return;
- if (btc->ver->fcxbtcrpt == 8) {
+ if (btc->ver->fcxbtcrpt == 7 || btc->ver->fcxbtcrpt == 8) {
r.v8.type = SET_REPORT_EN;
r.v8.fver = btc->ver->fcxbtcrpt;
r.v8.len = sizeof(r.v8.map);
@@ -2567,6 +2610,10 @@ static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
rtw89_fw_h2c_cxdrv_role_v1(rtwdev, type);
else if (ver->fwlrole == 2)
rtw89_fw_h2c_cxdrv_role_v2(rtwdev, type);
+ else if (ver->fwlrole == 7)
+ rtw89_fw_h2c_cxdrv_role_v7(rtwdev, type);
+ else if (ver->fwlrole == 8)
+ rtw89_fw_h2c_cxdrv_role_v8(rtwdev, type);
break;
case CXDRVINFO_CTRL:
if (ver->drvinfo_type == 1)
@@ -2748,7 +2795,7 @@ static void _set_gnt_v1(struct rtw89_dev *rtwdev, u8 phy_map,
rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt);
}
-#define BTC_TDMA_WLROLE_MAX 2
+#define BTC_TDMA_WLROLE_MAX 3
static void _set_bt_ignore_wlan_act(struct rtw89_dev *rtwdev, u8 enable)
{
@@ -2998,10 +3045,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_wl_active_role *r;
struct rtw89_btc_wl_active_role_v1 *r1;
struct rtw89_btc_wl_active_role_v2 *r2;
+ struct rtw89_btc_wl_active_role_v7 *r7;
struct rtw89_btc_wl_rlink *rlink;
u8 en = 0, i, ch = 0, bw = 0;
u8 mode, connect_cnt;
@@ -3018,6 +3067,9 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
} else if (ver->fwlrole == 2) {
mode = wl_rinfo_v2->link_mode;
connect_cnt = wl_rinfo_v2->connect_cnt;
+ } else if (ver->fwlrole == 7) {
+ mode = wl_rinfo_v7->link_mode;
+ connect_cnt = wl_rinfo_v7->connect_cnt;
} else if (ver->fwlrole == 8) {
mode = wl_rinfo_v8->link_mode;
connect_cnt = wl_rinfo_v8->connect_cnt;
@@ -3036,6 +3088,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
r2 = &wl_rinfo_v2->active_role_v2[i];
+ r7 = &wl_rinfo_v7->active_role[i];
rlink = &wl_rinfo_v8->rlink[i][0];
if (ver->fwlrole == 0 &&
@@ -3056,6 +3109,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
ch = r2->ch;
bw = r2->bw;
break;
+ } else if (ver->fwlrole == 7 &&
+ (r7->role == RTW89_WIFI_ROLE_P2P_GO ||
+ r7->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
+ ch = r7->ch;
+ bw = r7->bw;
+ break;
} else if (ver->fwlrole == 8 &&
(rlink->role == RTW89_WIFI_ROLE_P2P_GO ||
rlink->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
@@ -3071,6 +3130,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
r2 = &wl_rinfo_v2->active_role_v2[i];
+ r7 = &wl_rinfo_v7->active_role[i];
rlink = &wl_rinfo_v8->rlink[i][0];
if (ver->fwlrole == 0 &&
@@ -3088,6 +3148,11 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
ch = r2->ch;
bw = r2->bw;
break;
+ } else if (ver->fwlrole == 7 &&
+ r7->connected && r7->band == RTW89_BAND_2G) {
+ ch = r7->ch;
+ bw = r7->bw;
+ break;
} else if (ver->fwlrole == 8 &&
rlink->connected && rlink->rf_band == RTW89_BAND_2G) {
ch = rlink->ch;
@@ -3146,6 +3211,7 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc;
@@ -3164,6 +3230,8 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
connect_cnt = wl_rinfo_v1->connect_cnt;
else if (ver->fwlrole == 2)
connect_cnt = wl_rinfo_v2->connect_cnt;
+ else if (ver->fwlrole == 7)
+ connect_cnt = wl_rinfo_v7->connect_cnt;
else if (ver->fwlrole == 8)
connect_cnt = wl_rinfo_v8->connect_cnt;
@@ -4082,6 +4150,8 @@ static void _set_ant_v0(struct rtw89_dev *rtwdev, bool force_exec,
dbcc_chg = wl->role_info_v1.dbcc_chg;
else if (btc->ver->fwlrole == 2)
dbcc_chg = wl->role_info_v2.dbcc_chg;
+ else if (btc->ver->fwlrole == 7)
+ dbcc_chg = wl->role_info_v7.dbcc_chg;
else if (btc->ver->fwlrole == 8)
dbcc_chg = wl->role_info_v8.dbcc_chg;
@@ -4754,6 +4824,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_wl_role_info *wl_rinfo_v0 = &wl->role_info;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
@@ -4775,6 +4846,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
wl_rinfo.link_mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
wl_rinfo.link_mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ wl_rinfo.link_mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
wl_rinfo.link_mode = wl_rinfo_v8->link_mode;
else
@@ -4790,6 +4863,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
wl_rinfo.dbcc_2g_phy = wl_rinfo_v1->dbcc_2g_phy;
} else if (ver->fwlrole == 2) {
wl_rinfo.dbcc_2g_phy = wl_rinfo_v2->dbcc_2g_phy;
+ } else if (ver->fwlrole == 7) {
+ wl_rinfo.dbcc_2g_phy = wl_rinfo_v7->dbcc_2g_phy;
} else if (ver->fwlrole == 8) {
wl_rinfo.dbcc_2g_phy = wl_rinfo_v8->dbcc_2g_phy;
} else {
@@ -4835,37 +4910,56 @@ static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
- struct rtw89_btc_wl_role_info_v2 *wl_rinfo = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v2 *rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7;
+ struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &wl->role_info_v8;
const struct rtw89_chip_info *chip = rtwdev->chip;
- const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_dm *dm = &btc->dm;
- u8 is_preagc, val;
+ u8 is_preagc, val, link_mode, dbcc_2g_phy;
+ u8 role_ver = rtwdev->btc.ver->fwlrole;
+ bool dbcc_en;
if (btc->manual_ctrl)
return;
- if (wl_rinfo->link_mode == BTC_WLINK_25G_MCC)
+ if (role_ver == 2) {
+ dbcc_en = rinfo_v2->dbcc_en;
+ link_mode = rinfo_v2->link_mode;
+ dbcc_2g_phy = rinfo_v2->dbcc_2g_phy;
+ } else if (role_ver == 7) {
+ dbcc_en = rinfo_v7->dbcc_en;
+ link_mode = rinfo_v7->link_mode;
+ dbcc_2g_phy = rinfo_v7->dbcc_2g_phy;
+ } else if (role_ver == 8) {
+ dbcc_en = rinfo_v8->dbcc_en;
+ link_mode = rinfo_v8->link_mode;
+ dbcc_2g_phy = rinfo_v7->dbcc_2g_phy;
+ } else {
+ return;
+ }
+
+ if (link_mode == BTC_WLINK_25G_MCC) {
is_preagc = BTC_PREAGC_BB_FWCTRL;
- else if (!(bt->run_patch_code && bt->enable.now))
+ } else if (!(bt->run_patch_code && bt->enable.now)) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (wl_rinfo->link_mode == BTC_WLINK_5G)
+ } else if (link_mode == BTC_WLINK_5G) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (wl_rinfo->link_mode == BTC_WLINK_NOLINK ||
- btc->cx.bt.link_info.profile_cnt.now == 0)
+ } else if (link_mode == BTC_WLINK_NOLINK ||
+ btc->cx.bt.link_info.profile_cnt.now == 0) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (dm->tdma_now.type != CXTDMA_OFF &&
+ } else if (dm->tdma_now.type != CXTDMA_OFF &&
!bt_linfo->hfp_desc.exist &&
!bt_linfo->hid_desc.exist &&
- dm->fddt_train == BTC_FDDT_DISABLE)
+ dm->fddt_train == BTC_FDDT_DISABLE) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (ver->fwlrole == 2 && wl_rinfo->dbcc_en &&
- wl_rinfo->dbcc_2g_phy != RTW89_PHY_1)
+ } else if (dbcc_en && (dbcc_2g_phy != RTW89_PHY_1)) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (btc->ant_type == BTC_ANT_SHARED)
+ } else if (btc->ant_type == BTC_ANT_SHARED) {
is_preagc = BTC_PREAGC_DISABLE;
- else
+ } else {
is_preagc = BTC_PREAGC_ENABLE;
+ }
if (dm->wl_pre_agc_rb != dm->wl_pre_agc &&
dm->wl_pre_agc_rb != BTC_PREAGC_NOTFOUND) {
@@ -4968,6 +5062,7 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_txtime_data data = {.rtwdev = rtwdev};
u8 mode, igno_bt, tx_retry;
@@ -4984,6 +5079,8 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -5043,6 +5140,7 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
bool bt_hi_lna_rx = false;
@@ -5054,6 +5152,8 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -5359,15 +5459,26 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_btc_wl_role_info_v2 *wl_rinfo = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v2 *rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7;
+ u32 dur, mrole_type, mrole_noa_duration;
u16 policy_type = BTC_CXP_OFF_BT;
- u32 dur;
+
+ if (btc->ver->fwlrole == 2) {
+ mrole_type = rinfo_v2->mrole_type;
+ mrole_noa_duration = rinfo_v2->mrole_noa_duration;
+ } else if (btc->ver->fwlrole == 7) {
+ mrole_type = rinfo_v7->mrole_type;
+ mrole_noa_duration = rinfo_v7->mrole_noa_duration;
+ } else {
+ return;
+ }
if (btc->ant_type == BTC_ANT_DEDICATED) {
policy_type = BTC_CXP_OFF_EQ0;
} else {
/* shared-antenna */
- switch (wl_rinfo->mrole_type) {
+ switch (mrole_type) {
case BTC_WLMROLE_STA_GC:
dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_P2P_CLIENT;
@@ -5385,7 +5496,7 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev)
case BTC_WLMROLE_STA_GO_NOA:
dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_NONE;
- dur = wl_rinfo->mrole_noa_duration;
+ dur = mrole_noa_duration;
if (wl->status.map._4way) {
dm->wl_scc.ebt_null = 0;
@@ -5567,6 +5678,14 @@ _update_rssi_state(struct rtw89_dev *rtwdev, u8 pre_state, u8 rssi, u8 thresh)
return next_state;
}
+static void _wl_req_mac(struct rtw89_dev *rtwdev, u8 mac)
+{
+ if (mac == RTW89_MAC_0)
+ rtw89_write32_clr(rtwdev, R_AX_BTC_CFG, B_AX_WL_SRC);
+ else
+ rtw89_write32_set(rtwdev, R_AX_BTC_CFG, B_AX_WL_SRC);
+}
+
static
void _update_dbcc_band(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
@@ -6065,12 +6184,20 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
u8 *phy, u8 *role, u8 *dbcc_2g_phy)
{
struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl;
- struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7;
+ struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &wl->role_info_v8;
bool is_2g_ch_exist = false, is_multi_role_in_2g_phy = false;
- u8 j, k, dbcc_2g_cid, dbcc_2g_cid2;
+ u8 j, k, dbcc_2g_cid, dbcc_2g_cid2, connect_cnt;
+
+ if (rtwdev->btc.ver->fwlrole == 7)
+ connect_cnt = rinfo_v7->connect_cnt;
+ else if (rtwdev->btc.ver->fwlrole == 8)
+ connect_cnt = rinfo_v8->connect_cnt;
+ else
+ return BTC_WLINK_NOLINK;
/* find out the 2G-PHY by connect-id ->ch */
- for (j = 0; j < wl_rinfo->connect_cnt; j++) {
+ for (j = 0; j < connect_cnt; j++) {
if (ch[j].center_ch <= 14) {
is_2g_ch_exist = true;
break;
@@ -6085,11 +6212,11 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
*dbcc_2g_phy = phy[dbcc_2g_cid];
/* connect_cnt <= 2 */
- if (wl_rinfo->connect_cnt < BTC_TDMA_WLROLE_MAX)
+ if (connect_cnt < BTC_TDMA_WLROLE_MAX)
return (_get_role_link_mode((role[dbcc_2g_cid])));
/* find the other-port in the 2G-PHY, ex: PHY-0:6G, PHY1: mcc/scc */
- for (k = 0; k < wl_rinfo->connect_cnt; k++) {
+ for (k = 0; k < connect_cnt; k++) {
if (k == dbcc_2g_cid)
continue;
@@ -6116,29 +6243,54 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
static void _update_role_link_mode(struct rtw89_dev *rtwdev,
bool client_joined, u32 noa)
{
- struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &rtwdev->btc.cx.wl.role_info_v8;
+ struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &rtwdev->btc.cx.wl.role_info_v8;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &rtwdev->btc.cx.wl.role_info_v7;
+ u8 role_ver = rtwdev->btc.ver->fwlrole;
u32 type = BTC_WLMROLE_NONE, dur = 0;
- u32 wl_role = wl_rinfo->role_map;
+ u8 link_mode, connect_cnt;
+ u32 wl_role;
+
+ if (role_ver == 7) {
+ wl_role = rinfo_v7->role_map;
+ link_mode = rinfo_v7->link_mode;
+ connect_cnt = rinfo_v7->connect_cnt;
+ } else if (role_ver == 8) {
+ wl_role = rinfo_v8->role_map;
+ link_mode = rinfo_v8->link_mode;
+ connect_cnt = rinfo_v8->connect_cnt;
+ } else {
+ return;
+ }
/* if no client_joined, don't care P2P-GO/AP role */
if (((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) ||
(wl_role & BIT(RTW89_WIFI_ROLE_AP))) && !client_joined) {
- if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC) {
- wl_rinfo->link_mode = BTC_WLINK_2G_STA;
- wl_rinfo->connect_cnt--;
- } else if (wl_rinfo->link_mode == BTC_WLINK_2G_GO ||
- wl_rinfo->link_mode == BTC_WLINK_2G_AP) {
- wl_rinfo->link_mode = BTC_WLINK_NOLINK;
- wl_rinfo->connect_cnt--;
+ if (link_mode == BTC_WLINK_2G_SCC) {
+ if (role_ver == 7) {
+ rinfo_v7->link_mode = BTC_WLINK_2G_STA;
+ rinfo_v7->connect_cnt--;
+ } else if (role_ver == 8) {
+ rinfo_v8->link_mode = BTC_WLINK_2G_STA;
+ rinfo_v8->connect_cnt--;
+ }
+ } else if (link_mode == BTC_WLINK_2G_GO ||
+ link_mode == BTC_WLINK_2G_AP) {
+ if (role_ver == 7) {
+ rinfo_v7->link_mode = BTC_WLINK_NOLINK;
+ rinfo_v7->connect_cnt--;
+ } else if (role_ver == 8) {
+ rinfo_v8->link_mode = BTC_WLINK_NOLINK;
+ rinfo_v8->connect_cnt--;
+ }
}
}
/* Identify 2-Role type */
- if (wl_rinfo->connect_cnt >= 2 &&
- (wl_rinfo->link_mode == BTC_WLINK_2G_SCC ||
- wl_rinfo->link_mode == BTC_WLINK_2G_MCC ||
- wl_rinfo->link_mode == BTC_WLINK_25G_MCC ||
- wl_rinfo->link_mode == BTC_WLINK_5G)) {
+ if (connect_cnt >= 2 &&
+ (link_mode == BTC_WLINK_2G_SCC ||
+ link_mode == BTC_WLINK_2G_MCC ||
+ link_mode == BTC_WLINK_25G_MCC ||
+ link_mode == BTC_WLINK_5G)) {
if ((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) ||
(wl_role & BIT(RTW89_WIFI_ROLE_AP)))
type = noa ? BTC_WLMROLE_STA_GO_NOA : BTC_WLMROLE_STA_GO;
@@ -6150,8 +6302,167 @@ static void _update_role_link_mode(struct rtw89_dev *rtwdev,
dur = noa;
}
- wl_rinfo->mrole_type = type;
- wl_rinfo->mrole_noa_duration = dur;
+ if (role_ver == 7) {
+ rinfo_v7->mrole_type = type;
+ rinfo_v7->mrole_noa_duration = dur;
+ } else if (role_ver == 8) {
+ rinfo_v8->mrole_type = type;
+ rinfo_v8->mrole_noa_duration = dur;
+ }
+}
+
+static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid)
+{
+ struct rtw89_btc_chdef cid_ch[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo = &wl->role_info_v7;
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info;
+ struct rtw89_btc_wl_active_role_v7 *act_role = NULL;
+ u8 i, mode, cnt = 0, cnt_2g = 0, cnt_5g = 0, phy_now = RTW89_PHY_MAX, phy_dbcc;
+ bool b2g = false, b5g = false, client_joined = false, client_inc_2g = false;
+ u8 client_cnt_last[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 cid_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 cid_phy[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 mac = RTW89_MAC_0, dbcc_2g_phy = RTW89_PHY_0;
+ u32 noa_duration = 0;
+
+ memset(wl_rinfo, 0, sizeof(*wl_rinfo));
+
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
+ if (!wl_linfo[i].active || wl_linfo[i].phy >= RTW89_PHY_MAX)
+ continue;
+
+ act_role = &wl_rinfo->active_role[i];
+ act_role->role = wl_linfo[i].role;
+
+ /* check if role connect? */
+ if (wl_linfo[i].connected == MLME_NO_LINK) {
+ act_role->connected = 0;
+ continue;
+ } else if (wl_linfo[i].connected == MLME_LINKING) {
+ continue;
+ }
+
+ cnt++;
+ act_role->connected = 1;
+ act_role->pid = wl_linfo[i].pid;
+ act_role->phy = wl_linfo[i].phy;
+ act_role->band = wl_linfo[i].band;
+ act_role->ch = wl_linfo[i].ch;
+ act_role->bw = wl_linfo[i].bw;
+ act_role->noa = wl_linfo[i].noa;
+ act_role->noa_dur = wl_linfo[i].noa_duration;
+ cid_ch[cnt - 1] = wl_linfo[i].chdef;
+ cid_phy[cnt - 1] = wl_linfo[i].phy;
+ cid_role[cnt - 1] = wl_linfo[i].role;
+ wl_rinfo->role_map |= BIT(wl_linfo[i].role);
+
+ if (rid == i)
+ phy_now = act_role->phy;
+
+ if (wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_AP) {
+ if (wl_linfo[i].client_cnt > 1)
+ client_joined = true;
+ if (client_cnt_last[i] < wl_linfo[i].client_cnt &&
+ wl_linfo[i].chdef.band == RTW89_BAND_2G)
+ client_inc_2g = true;
+ act_role->client_cnt = wl_linfo[i].client_cnt;
+ } else {
+ act_role->client_cnt = 0;
+ }
+
+ if (act_role->noa && act_role->noa_dur > 0)
+ noa_duration = act_role->noa_dur;
+
+ if (rtwdev->dbcc_en) {
+ phy_dbcc = wl_linfo[i].phy;
+ wl_dinfo->role[phy_dbcc] |= BIT(wl_linfo[i].role);
+ wl_dinfo->op_band[phy_dbcc] = wl_linfo[i].chdef.band;
+ }
+
+ if (wl_linfo[i].chdef.band != RTW89_BAND_2G) {
+ cnt_5g++;
+ b5g = true;
+ } else {
+ if (((wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_AP) &&
+ client_joined) ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ wl_rinfo->p2p_2g = 1;
+
+ if ((wl_linfo[i].mode & BIT(BTC_WL_MODE_11B)) ||
+ (wl_linfo[i].mode & BIT(BTC_WL_MODE_11G)))
+ wl->bg_mode = 1;
+ else if (wl_linfo[i].mode & BIT(BTC_WL_MODE_HE))
+ wl->he_mode = true;
+
+ cnt_2g++;
+ b2g = true;
+ }
+
+ if (act_role->band == RTW89_BAND_5G && act_role->ch >= 100)
+ wl->is_5g_hi_channel = 1;
+ else
+ wl->is_5g_hi_channel = 0;
+ }
+
+ wl_rinfo->connect_cnt = cnt;
+ wl->client_cnt_inc_2g = client_inc_2g;
+
+ if (cnt == 0) {
+ mode = BTC_WLINK_NOLINK;
+ wl_rinfo->role_map = BIT(RTW89_WIFI_ROLE_NONE);
+ } else if (!b2g && b5g) {
+ mode = BTC_WLINK_5G;
+ } else if (wl_rinfo->role_map & BIT(RTW89_WIFI_ROLE_NAN)) {
+ mode = BTC_WLINK_2G_NAN;
+ } else if (cnt > BTC_TDMA_WLROLE_MAX) {
+ mode = BTC_WLINK_OTHER;
+ } else if (rtwdev->dbcc_en) {
+ mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role, &dbcc_2g_phy);
+
+ /* correct 2G-located PHY band for gnt ctrl */
+ if (dbcc_2g_phy < RTW89_PHY_MAX)
+ wl_dinfo->op_band[dbcc_2g_phy] = RTW89_BAND_2G;
+ } else if (b2g && b5g && cnt == 2) {
+ mode = BTC_WLINK_25G_MCC;
+ } else if (!b5g && cnt == 2) { /* cnt_connect = 2 */
+ if (_chk_role_ch_group(&cid_ch[0], &cid_ch[cnt - 1]))
+ mode = BTC_WLINK_2G_SCC;
+ else
+ mode = BTC_WLINK_2G_MCC;
+ } else if (!b5g && cnt == 1) { /* cnt_connect = 1 */
+ mode = _get_role_link_mode(cid_role[0]);
+ } else {
+ mode = BTC_WLINK_NOLINK;
+ }
+
+ wl_rinfo->link_mode = mode;
+ _update_role_link_mode(rtwdev, client_joined, noa_duration);
+
+ /* todo DBCC related event */
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC] wl_info phy_now=%d\n", phy_now);
+
+ if (wl_rinfo->dbcc_en != rtwdev->dbcc_en) {
+ wl_rinfo->dbcc_chg = 1;
+ wl_rinfo->dbcc_en = rtwdev->dbcc_en;
+ btc->cx.cnt_wl[BTC_WCNT_DBCC_CHG]++;
+ }
+
+ if (rtwdev->dbcc_en) {
+ wl_rinfo->dbcc_2g_phy = dbcc_2g_phy;
+
+ if (dbcc_2g_phy == RTW89_PHY_1)
+ mac = RTW89_MAC_1;
+
+ _update_dbcc_band(rtwdev, RTW89_PHY_0);
+ _update_dbcc_band(rtwdev, RTW89_PHY_1);
+ }
+ _wl_req_mac(rtwdev, mac);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
}
static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id,
@@ -6496,6 +6807,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
u8 mode, igno_bt, always_freerun;
@@ -6511,6 +6823,8 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -6657,7 +6971,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
_action_wl_2g_scc(rtwdev);
else if (ver->fwlrole == 1)
_action_wl_2g_scc_v1(rtwdev);
- else if (ver->fwlrole == 2)
+ else if (ver->fwlrole == 2 || ver->fwlrole == 7)
_action_wl_2g_scc_v2(rtwdev);
else if (ver->fwlrole == 8)
_action_wl_2g_scc_v8(rtwdev);
@@ -7169,7 +7483,7 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
struct rtw89_sta *rtwsta, enum btc_role_state state)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
struct rtw89_btc *btc = &rtwdev->btc;
@@ -7250,6 +7564,9 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
} else if (ver->fwlrole == 2) {
*wlinfo = r;
_update_wl_info_v2(rtwdev);
+ } else if (ver->fwlrole == 7) {
+ *wlinfo = r;
+ _update_wl_info_v7(rtwdev, r.pid);
} else if (ver->fwlrole == 8) {
wlinfo = &wl->rlink_info[r.pid][rlink_id];
*wlinfo = r;
@@ -7856,6 +8173,7 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
u8 mode;
@@ -7870,6 +8188,8 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -10288,6 +10608,108 @@ static void _show_summary_v105(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt[BTC_NCNT_CUSTOMERIZE]);
}
+static void _show_summary_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
+ struct rtw89_btc_fbtc_rpt_ctrl_v7 *prptctrl = NULL;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_cx *cx = &rtwdev->btc.cx;
+ struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ u32 *cnt = rtwdev->btc.dm.cnt_notify;
+ u32 cnt_sum = 0;
+ u8 i;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
+ return;
+
+ seq_printf(m, "%s", "\n\r========== [Statistics] ==========");
+
+ pcinfo = &pfwinfo->rpt_ctrl.cinfo;
+ if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF &&
+ !wl->status.map.rf_off) {
+ prptctrl = &pfwinfo->rpt_ctrl.finfo.v7;
+
+ seq_printf(m,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d),"
+ "c2h_cnt=%d(fw_send:%d, len:%d, max:%d), ",
+ "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c), pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h),
+ rtwdev->btc.ver->info_buf);
+
+ seq_printf(m, "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
+
+ if (dm->error.map.wl_fw_hang)
+ seq_puts(m, " (WL FW Hang!!)");
+
+ seq_printf(m, "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]", le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ seq_printf(m, "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ seq_printf(m,
+ "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT],
+ wl->rfk_info.proc_time);
+
+ seq_printf(m, ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ seq_printf(m, ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ } else {
+ seq_printf(m,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)",
+ "[summary]",
+ pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ pfwinfo->cnt_c2h,
+ wl->status.map.lps, wl->status.map.rf_off);
+ }
+
+ for (i = 0; i < BTC_NCNT_NUM; i++)
+ cnt_sum += dm->cnt_notify[i];
+
+ seq_printf(m,
+ "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ seq_printf(m,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
+
+ seq_printf(m,
+ "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
+
+ seq_printf(m, "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE],
+ rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW],
+ cnt[BTC_NCNT_COUNTRYCODE]);
+}
+
static void _show_summary_v8(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
@@ -10440,6 +10862,8 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_summary_v5(rtwdev, m);
else if (ver->fcxbtcrpt == 105)
_show_summary_v105(rtwdev, m);
+ else if (ver->fcxbtcrpt == 7)
+ _show_summary_v7(rtwdev, m);
else if (ver->fcxbtcrpt == 8)
_show_summary_v8(rtwdev, m);
}
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index 0e5f268616f7..de53b56632f7 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -193,6 +193,8 @@ enum btc_wa_type {
BTC_WA_5G_HI_CH_RX = BIT(0),
BTC_WA_NULL_AP = BIT(1),
BTC_WA_HFP_ZB = BIT(2), /* HFP PTA req bit4 define issue */
+ BTC_WA_HFP_LAG = BIT(3), /* 52BT WL break BT Rx lag issue */
+ BTC_WA_INIT_SCAN = BIT(4) /* 52A/C/D init scan move to wl slot WA */
};
enum btc_3cx_type {
@@ -289,9 +291,10 @@ void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev);
static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
- enum rtw89_rf_path_bit paths)
+ enum rtw89_rf_path_bit paths,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 phy_map;
phy_map = FIELD_PREP(BTC_RFK_PATH_MAP, paths) |
@@ -303,9 +306,10 @@ static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev,
static inline u8 rtw89_btc_path_phymap(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- return rtw89_btc_phymap(rtwdev, phy_idx, BIT(path));
+ return rtw89_btc_phymap(rtwdev, phy_idx, BIT(path), chanctx_idx);
}
/* return bt req len in TU */
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 7019f7d482a8..4553810634c6 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -346,8 +346,8 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
struct rtw89_hal *hal = &rtwdev->hal;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_chan *chan;
- enum rtw89_sub_entity_idx sub_entity_idx;
- enum rtw89_sub_entity_idx roc_idx;
+ enum rtw89_chanctx_idx chanctx_idx;
+ enum rtw89_chanctx_idx roc_idx;
enum rtw89_phy_idx phy_idx;
enum rtw89_entity_mode mode;
bool entity_active;
@@ -360,22 +360,22 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
switch (mode) {
case RTW89_ENTITY_MODE_SCC:
case RTW89_ENTITY_MODE_MCC:
- sub_entity_idx = RTW89_SUB_ENTITY_0;
+ chanctx_idx = RTW89_CHANCTX_0;
break;
case RTW89_ENTITY_MODE_MCC_PREPARE:
- sub_entity_idx = RTW89_SUB_ENTITY_1;
+ chanctx_idx = RTW89_CHANCTX_1;
break;
default:
WARN(1, "Invalid ent mode: %d\n", mode);
return;
}
- roc_idx = atomic_read(&hal->roc_entity_idx);
- if (roc_idx != RTW89_SUB_ENTITY_IDLE)
- sub_entity_idx = roc_idx;
+ roc_idx = atomic_read(&hal->roc_chanctx_idx);
+ if (roc_idx != RTW89_CHANCTX_IDLE)
+ chanctx_idx = roc_idx;
phy_idx = RTW89_PHY_0;
- chan = rtw89_chan_get(rtwdev, sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, chanctx_idx);
chip->ops->set_txpwr(rtwdev, chan, phy_idx);
}
@@ -385,8 +385,8 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev)
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_chan_rcd *chan_rcd;
const struct rtw89_chan *chan;
- enum rtw89_sub_entity_idx sub_entity_idx;
- enum rtw89_sub_entity_idx roc_idx;
+ enum rtw89_chanctx_idx chanctx_idx;
+ enum rtw89_chanctx_idx roc_idx;
enum rtw89_mac_idx mac_idx;
enum rtw89_phy_idx phy_idx;
struct rtw89_channel_help_params bak;
@@ -399,25 +399,25 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev)
switch (mode) {
case RTW89_ENTITY_MODE_SCC:
case RTW89_ENTITY_MODE_MCC:
- sub_entity_idx = RTW89_SUB_ENTITY_0;
+ chanctx_idx = RTW89_CHANCTX_0;
break;
case RTW89_ENTITY_MODE_MCC_PREPARE:
- sub_entity_idx = RTW89_SUB_ENTITY_1;
+ chanctx_idx = RTW89_CHANCTX_1;
break;
default:
WARN(1, "Invalid ent mode: %d\n", mode);
return -EINVAL;
}
- roc_idx = atomic_read(&hal->roc_entity_idx);
- if (roc_idx != RTW89_SUB_ENTITY_IDLE)
- sub_entity_idx = roc_idx;
+ roc_idx = atomic_read(&hal->roc_chanctx_idx);
+ if (roc_idx != RTW89_CHANCTX_IDLE)
+ chanctx_idx = roc_idx;
mac_idx = RTW89_MAC_0;
phy_idx = RTW89_PHY_0;
- chan = rtw89_chan_get(rtwdev, sub_entity_idx);
- chan_rcd = rtw89_chan_rcd_get(rtwdev, sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ chan_rcd = rtw89_chan_rcd_get(rtwdev, chanctx_idx);
rtw89_chip_set_channel_prepare(rtwdev, &bak, chan, mac_idx, phy_idx);
@@ -429,7 +429,7 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev)
if (!entity_active || chan_rcd->band_changed) {
rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan->band_type);
- rtw89_chip_rfk_band_changed(rtwdev, phy_idx);
+ rtw89_chip_rfk_band_changed(rtwdev, phy_idx, chan);
}
rtw89_set_entity_state(rtwdev, true);
@@ -441,7 +441,7 @@ void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
{
const struct cfg80211_chan_def *chandef;
- chandef = rtw89_chandef_get(rtwdev, rtwvif->sub_entity_idx);
+ chandef = rtw89_chandef_get(rtwdev, rtwvif->chanctx_idx);
rtw89_get_channel_params(chandef, chan);
}
@@ -602,15 +602,28 @@ static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
return rtwsta->mac_id;
}
+static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+
+ desc_info->hdr_llc_len = ieee80211_hdrlen(fc);
+ desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */
+}
+
static void
rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_vif *vif = tx_req->vif;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
+ struct sk_buff *skb = tx_req->skb;
u8 qsel, ch_dma;
qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT;
@@ -629,6 +642,11 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
desc_info->dis_data_fb = true;
desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req, chan);
+ if (chip->hw_mgmt_tx_encrypt && IEEE80211_SKB_CB(skb)->control.hw_key) {
+ rtw89_core_tx_update_sec_key(rtwdev, tx_req);
+ rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb);
+ }
+
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
"tx mgmt frame with rate 0x%x on channel %d (band %d, bw %d)\n",
desc_info->data_rate, chan->channel, chan->band_type,
@@ -769,7 +787,7 @@ static u16 rtw89_core_get_data_rate(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta = tx_req->sta;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
- enum rtw89_sub_entity_idx idx = rtwvif->sub_entity_idx;
+ enum rtw89_chanctx_idx idx = rtwvif->chanctx_idx;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx);
u16 lowest_rate;
@@ -862,17 +880,6 @@ rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev,
return PACKET_MAX;
}
-static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev,
- struct rtw89_tx_desc_info *desc_info,
- struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (void *)skb->data;
- __le16 fc = hdr->frame_control;
-
- desc_info->hdr_llc_len = ieee80211_hdrlen(fc);
- desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */
-}
-
static void
rtw89_core_tx_wake(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
@@ -1449,16 +1456,20 @@ static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev,
return -EINVAL;
}
- /* For WiFi 7 chips, RXWD.mac_id of PPDU status is not set by hardware,
- * so update mac_id by rxinfo_user[].mac_id.
- */
- for (i = 0; i < usr_num && chip_gen == RTW89_CHIP_BE; i++) {
+ for (i = 0; i < usr_num; i++) {
user = &rxinfo->user[i];
if (!le32_get_bits(user->w0, RTW89_RXINFO_USER_MAC_ID_VALID))
continue;
-
- phy_ppdu->mac_id =
- le32_get_bits(user->w0, RTW89_RXINFO_USER_MACID);
+ /* For WiFi 7 chips, RXWD.mac_id of PPDU status is not set
+ * by hardware, so update mac_id by rxinfo_user[].mac_id.
+ */
+ if (chip_gen == RTW89_CHIP_BE)
+ phy_ppdu->mac_id =
+ le32_get_bits(user->w0, RTW89_RXINFO_USER_MACID);
+ phy_ppdu->has_data =
+ le32_get_bits(user->w0, RTW89_RXINFO_USER_DATA);
+ phy_ppdu->has_bcn =
+ le32_get_bits(user->w0, RTW89_RXINFO_USER_BCN);
break;
}
@@ -1480,6 +1491,26 @@ static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev,
return 0;
}
+static u8 rtw89_get_data_rate_nss(struct rtw89_dev *rtwdev, u16 data_rate)
+{
+ u8 data_rate_mode;
+
+ data_rate_mode = rtw89_get_data_rate_mode(rtwdev, data_rate);
+ switch (data_rate_mode) {
+ case DATA_RATE_MODE_NON_HT:
+ return 1;
+ case DATA_RATE_MODE_HT:
+ return rtw89_get_data_ht_nss(rtwdev, data_rate) + 1;
+ case DATA_RATE_MODE_VHT:
+ case DATA_RATE_MODE_HE:
+ case DATA_RATE_MODE_EHT:
+ return rtw89_get_data_nss(rtwdev, data_rate) + 1;
+ default:
+ rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode);
+ return 0;
+ }
+}
+
static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
struct ieee80211_sta *sta)
{
@@ -1509,10 +1540,14 @@ static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
ewma_rssi_add(&rtwsta->rssi[i], phy_ppdu->rssi[i]);
}
- if (phy_ppdu->ofdm.has) {
+ if (phy_ppdu->ofdm.has && (phy_ppdu->has_data || phy_ppdu->has_bcn)) {
ewma_snr_add(&rtwsta->avg_snr, phy_ppdu->ofdm.avg_snr);
- ewma_evm_add(&rtwsta->evm_min[evm_pos], phy_ppdu->ofdm.evm_min);
- ewma_evm_add(&rtwsta->evm_max[evm_pos], phy_ppdu->ofdm.evm_max);
+ if (rtw89_get_data_rate_nss(rtwdev, phy_ppdu->rate) == 1) {
+ ewma_evm_add(&rtwsta->evm_1ss, phy_ppdu->ofdm.evm_min);
+ } else {
+ ewma_evm_add(&rtwsta->evm_min[evm_pos], phy_ppdu->ofdm.evm_min);
+ ewma_evm_add(&rtwsta->evm_max[evm_pos], phy_ppdu->ofdm.evm_max);
+ }
}
}
@@ -1548,11 +1583,27 @@ static u16 rtw89_core_get_phy_status_ie_len(struct rtw89_dev *rtwdev,
return ie_len;
}
+static void rtw89_core_parse_phy_status_ie01_v2(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_sts_iehdr *iehdr,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_ie01_v2 *ie;
+ u8 *rpl_fd = phy_ppdu->rpl_fd;
+
+ ie = (const struct rtw89_phy_sts_ie01_v2 *)iehdr;
+ rpl_fd[RF_PATH_A] = le32_get_bits(ie->w8, RTW89_PHY_STS_IE01_V2_W8_RPL_FD_A);
+ rpl_fd[RF_PATH_B] = le32_get_bits(ie->w8, RTW89_PHY_STS_IE01_V2_W8_RPL_FD_B);
+ rpl_fd[RF_PATH_C] = le32_get_bits(ie->w9, RTW89_PHY_STS_IE01_V2_W9_RPL_FD_C);
+ rpl_fd[RF_PATH_D] = le32_get_bits(ie->w9, RTW89_PHY_STS_IE01_V2_W9_RPL_FD_D);
+
+ phy_ppdu->bw_idx = le32_get_bits(ie->w5, RTW89_PHY_STS_IE01_V2_W5_BW_IDX);
+}
+
static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
const struct rtw89_phy_sts_iehdr *iehdr,
struct rtw89_rx_phy_ppdu *phy_ppdu)
{
- const struct rtw89_phy_sts_ie0 *ie = (const struct rtw89_phy_sts_ie0 *)iehdr;
+ const struct rtw89_phy_sts_ie01 *ie = (const struct rtw89_phy_sts_ie01 *)iehdr;
s16 cfo;
u32 t;
@@ -1563,12 +1614,17 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
phy_ppdu->stbc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_STBC);
}
+ if (!phy_ppdu->hdr_2_en)
+ phy_ppdu->rx_path_en =
+ le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_RX_PATH_EN);
+
if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6)
return;
if (!phy_ppdu->to_self)
return;
+ phy_ppdu->rpl_avg = le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_RSSI_AVG_FD);
phy_ppdu->ofdm.avg_snr = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_AVG_SNR);
phy_ppdu->ofdm.evm_max = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MAX);
phy_ppdu->ofdm.evm_min = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MIN);
@@ -1584,6 +1640,39 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
}
rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu);
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ rtw89_core_parse_phy_status_ie01_v2(rtwdev, iehdr, phy_ppdu);
+}
+
+static void rtw89_core_parse_phy_status_ie00(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_sts_iehdr *iehdr,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_ie00 *ie = (const struct rtw89_phy_sts_ie00 *)iehdr;
+ u16 tmp_rpl;
+
+ tmp_rpl = le32_get_bits(ie->w0, RTW89_PHY_STS_IE00_W0_RPL);
+ phy_ppdu->rpl_avg = tmp_rpl >> 1;
+}
+
+static void rtw89_core_parse_phy_status_ie00_v2(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_sts_iehdr *iehdr,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_ie00_v2 *ie;
+ u8 *rpl_path = phy_ppdu->rpl_path;
+ u16 tmp_rpl[RF_PATH_MAX];
+ u8 i;
+
+ ie = (const struct rtw89_phy_sts_ie00_v2 *)iehdr;
+ tmp_rpl[RF_PATH_A] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_A);
+ tmp_rpl[RF_PATH_B] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_B);
+ tmp_rpl[RF_PATH_C] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_C);
+ tmp_rpl[RF_PATH_D] = le32_get_bits(ie->w5, RTW89_PHY_STS_IE00_V2_W5_RPL_TD_D);
+
+ for (i = 0; i < RF_PATH_MAX; i++)
+ rpl_path[i] = tmp_rpl[i] >> 1;
}
static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev,
@@ -1595,6 +1684,11 @@ static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev,
ie = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_TYPE);
switch (ie) {
+ case RTW89_PHYSTS_IE00_CMN_CCK:
+ rtw89_core_parse_phy_status_ie00(rtwdev, iehdr, phy_ppdu);
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ rtw89_core_parse_phy_status_ie00_v2(rtwdev, iehdr, phy_ppdu);
+ break;
case RTW89_PHYSTS_IE01_CMN_OFDM:
rtw89_core_parse_phy_status_ie01(rtwdev, iehdr, phy_ppdu);
break;
@@ -1605,6 +1699,13 @@ static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev,
return 0;
}
+static void rtw89_core_update_phy_ppdu_hdr_v2(struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_hdr_v2 *hdr = phy_ppdu->buf + PHY_STS_HDR_LEN;
+
+ phy_ppdu->rx_path_en = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_V2_W0_PATH_EN);
+}
+
static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
{
const struct rtw89_phy_sts_hdr *hdr = phy_ppdu->buf;
@@ -1616,6 +1717,10 @@ static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
rssi[RF_PATH_B] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_B);
rssi[RF_PATH_C] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_C);
rssi[RF_PATH_D] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_D);
+
+ phy_ppdu->hdr_2_en = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_HDR_2_EN);
+ if (phy_ppdu->hdr_2_en)
+ rtw89_core_update_phy_ppdu_hdr_v2(phy_ppdu);
}
static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev,
@@ -1668,6 +1773,7 @@ static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev,
}
}
+ rtw89_chip_convert_rpl_to_rssi(rtwdev, phy_ppdu);
rtw89_phy_antdiv_parse(rtwdev, phy_ppdu);
return 0;
@@ -1959,7 +2065,7 @@ static void rtw89_correct_cck_chan(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *status)
{
const struct rtw89_chan_rcd *rcd =
- rtw89_chan_rcd_get(rtwdev, RTW89_SUB_ENTITY_0);
+ rtw89_chan_rcd_get(rtwdev, RTW89_CHANCTX_0);
u16 chan = rcd->prev_primary_channel;
u8 band = rtw89_hw_to_nl80211_band(rcd->prev_band_type);
@@ -2363,7 +2469,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *rx_status)
{
const struct cfg80211_chan_def *chandef =
- rtw89_chandef_get(rtwdev, RTW89_SUB_ENTITY_0);
+ rtw89_chandef_get(rtwdev, RTW89_CHANCTX_0);
u16 data_rate;
u8 data_rate_mode;
bool eht = false;
@@ -2856,7 +2962,7 @@ static void rtw89_core_sta_pending_tx_iter(void *data,
struct sk_buff *skb, *tmp;
int qsel, ret;
- if (rtwvif->sub_entity_idx != rtwvif_target->sub_entity_idx)
+ if (rtwvif->chanctx_idx != rtwvif_target->chanctx_idx)
return;
if (skb_queue_len(&rtwsta->roc_queue) == 0)
@@ -2950,11 +3056,11 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
"roc send null-1 failed: %d\n", ret);
rtw89_for_each_rtwvif(rtwdev, tmp)
- if (tmp->sub_entity_idx == rtwvif->sub_entity_idx)
+ if (tmp->chanctx_idx == rtwvif->chanctx_idx)
tmp->offchan = true;
cfg80211_chandef_create(&roc_chan, &roc->chan, NL80211_CHAN_NO_HT);
- rtw89_config_roc_chandef(rtwdev, rtwvif->sub_entity_idx, &roc_chan);
+ rtw89_config_roc_chandef(rtwdev, rtwvif->chanctx_idx, &roc_chan);
rtw89_set_channel(rtwdev);
rtw89_write32_clr(rtwdev,
rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
@@ -2987,7 +3093,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtwdev->hal.rx_fltr);
roc->state = RTW89_ROC_IDLE;
- rtw89_config_roc_chandef(rtwdev, rtwvif->sub_entity_idx, NULL);
+ rtw89_config_roc_chandef(rtwdev, rtwvif->chanctx_idx, NULL);
rtw89_chanctx_proceed(rtwdev);
ret = rtw89_core_send_nullfunc(rtwdev, rtwvif, true, false);
if (ret)
@@ -2995,7 +3101,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
"roc send null-0 failed: %d\n", ret);
rtw89_for_each_rtwvif(rtwdev, tmp)
- if (tmp->sub_entity_idx == rtwvif->sub_entity_idx)
+ if (tmp->chanctx_idx == rtwvif->chanctx_idx)
tmp->offchan = false;
rtw89_core_handle_sta_pending_tx(rtwdev, rtwvif);
@@ -3189,6 +3295,7 @@ static void rtw89_track_work(struct work_struct *work)
rtw89_phy_edcca_track(rtwdev);
rtw89_tas_track(rtwdev);
rtw89_chanctx_track(rtwdev);
+ rtw89_core_rfkill_poll(rtwdev, false);
if (rtwdev->lps_enabled && !rtwdev->btc.lps)
rtw89_enter_lps_track(rtwdev);
@@ -3367,6 +3474,7 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
ewma_rssi_init(&rtwsta->avg_rssi);
ewma_snr_init(&rtwsta->avg_snr);
+ ewma_evm_init(&rtwsta->evm_1ss);
for (i = 0; i < ant_num; i++) {
ewma_rssi_init(&rtwsta->rssi[i]);
ewma_evm_init(&rtwsta->evm_min[i]);
@@ -3384,7 +3492,7 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_START);
- rtw89_chip_rfk_channel(rtwdev);
+ rtw89_chip_rfk_channel(rtwdev, rtwvif);
} else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
rtwsta->mac_id = rtw89_acquire_mac_id(rtwdev);
if (rtwsta->mac_id == RTW89_MAX_MAC_ID_NUM)
@@ -3491,7 +3599,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif, rtwsta);
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
int ret;
if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
@@ -4226,9 +4334,14 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 mac_id_num = chip->support_macid_num;
+ u8 mac_id_num;
u8 mac_id;
+ if (rtwdev->support_mlo)
+ mac_id_num = chip->support_macid_num / chip->support_link_num;
+ else
+ mac_id_num = chip->support_macid_num;
+
mac_id = find_first_zero_bit(rtwdev->mac_id_map, mac_id_num);
if (mac_id == mac_id_num)
return RTW89_MAX_MAC_ID_NUM;
@@ -4278,6 +4391,8 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_init_wait(&rtwdev->mcc.wait);
rtw89_init_wait(&rtwdev->mac.fw_ofld_wait);
+ rtw89_init_wait(&rtwdev->wow.wait);
+ rtw89_init_wait(&rtwdev->mac.ps_wait);
INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work);
INIT_WORK(&rtwdev->ips_work, rtw89_ips_work);
@@ -4333,7 +4448,7 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
const u8 *mac_addr, bool hw_scan)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
rtwdev->scanning = true;
rtw89_leave_lps(rtwdev);
@@ -4342,7 +4457,7 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
ether_addr_copy(rtwvif->mac_addr, mac_addr);
rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, chan->band_type);
- rtw89_chip_rfk_scan(rtwdev, true);
+ rtw89_chip_rfk_scan(rtwdev, rtwvif, true);
rtw89_hci_recalc_int_mit(rtwdev);
rtw89_phy_config_edcca(rtwdev, true);
@@ -4360,7 +4475,7 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
ether_addr_copy(rtwvif->mac_addr, vif->addr);
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
- rtw89_chip_rfk_scan(rtwdev, false);
+ rtw89_chip_rfk_scan(rtwdev, rtwvif, false);
rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0);
rtw89_phy_config_edcca(rtwdev, false);
@@ -4470,6 +4585,70 @@ static int rtw89_chip_board_info_setup(struct rtw89_dev *rtwdev)
return 0;
}
+static bool rtw89_chip_has_rfkill(struct rtw89_dev *rtwdev)
+{
+ return !!rtwdev->chip->rfkill_init;
+}
+
+static void rtw89_core_rfkill_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_rfkill_regs *regs = rtwdev->chip->rfkill_init;
+
+ rtw89_write16_mask(rtwdev, regs->pinmux.addr,
+ regs->pinmux.mask, regs->pinmux.data);
+ rtw89_write16_mask(rtwdev, regs->mode.addr,
+ regs->mode.mask, regs->mode.data);
+}
+
+static bool rtw89_core_rfkill_get(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_reg_def *reg = &rtwdev->chip->rfkill_get;
+
+ return !rtw89_read8_mask(rtwdev, reg->addr, reg->mask);
+}
+
+static void rtw89_rfkill_polling_init(struct rtw89_dev *rtwdev)
+{
+ if (!rtw89_chip_has_rfkill(rtwdev))
+ return;
+
+ rtw89_core_rfkill_init(rtwdev);
+ rtw89_core_rfkill_poll(rtwdev, true);
+ wiphy_rfkill_start_polling(rtwdev->hw->wiphy);
+}
+
+static void rtw89_rfkill_polling_deinit(struct rtw89_dev *rtwdev)
+{
+ if (!rtw89_chip_has_rfkill(rtwdev))
+ return;
+
+ wiphy_rfkill_stop_polling(rtwdev->hw->wiphy);
+}
+
+void rtw89_core_rfkill_poll(struct rtw89_dev *rtwdev, bool force)
+{
+ bool prev, blocked;
+
+ if (!rtw89_chip_has_rfkill(rtwdev))
+ return;
+
+ prev = test_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags);
+ blocked = rtw89_core_rfkill_get(rtwdev);
+
+ if (!force && prev == blocked)
+ return;
+
+ rtw89_info(rtwdev, "rfkill hardware state changed to %s\n",
+ blocked ? "disable" : "enable");
+
+ if (blocked)
+ set_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags);
+ else
+ clear_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags);
+
+ wiphy_rfkill_set_hw_state(rtwdev->hw->wiphy, blocked);
+}
+
int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
{
int ret;
@@ -4580,6 +4759,9 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
if (chip->chip_gen == RTW89_CHIP_BE)
hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+ if (rtwdev->support_mlo)
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
@@ -4587,6 +4769,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
#ifdef CONFIG_PM
hw->wiphy->wowlan = rtwdev->chip->wowlan_stub;
+ hw->wiphy->max_sched_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
#endif
hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
@@ -4625,6 +4808,8 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
goto err_unregister_hw;
}
+ rtw89_rfkill_polling_init(rtwdev);
+
return 0;
err_unregister_hw:
@@ -4639,6 +4824,7 @@ static void rtw89_core_unregister_hw(struct rtw89_dev *rtwdev)
{
struct ieee80211_hw *hw = rtwdev->hw;
+ rtw89_rfkill_polling_deinit(rtwdev);
ieee80211_unregister_hw(hw);
rtw89_core_clr_supported_band(rtwdev);
}
@@ -4662,6 +4848,8 @@ EXPORT_SYMBOL(rtw89_core_register);
void rtw89_core_unregister(struct rtw89_dev *rtwdev)
{
rtw89_core_unregister_hw(rtwdev);
+
+ rtw89_debugfs_deinit(rtwdev);
}
EXPORT_SYMBOL(rtw89_core_unregister);
@@ -4676,6 +4864,7 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
struct ieee80211_ops *ops;
u32 driver_data_size;
int fw_format = -1;
+ bool support_mlo;
bool no_chanctx;
firmware = rtw89_early_fw_feature_recognize(device, chip, &early_fw, &fw_format);
@@ -4704,6 +4893,14 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
if (!hw)
goto err;
+ /* TODO: When driver MLO arch. is done, determine whether to support MLO
+ * according to the following conditions.
+ * 1. run with chanctx_ops
+ * 2. chip->support_link_num != 0
+ * 3. FW feature supports AP_LINK_PS
+ */
+ support_mlo = false;
+
hw->wiphy->iface_combinations = rtw89_iface_combs;
if (no_chanctx || chip->support_chanctx_num == 1)
@@ -4718,9 +4915,12 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
rtwdev->chip = chip;
rtwdev->fw.req.firmware = firmware;
rtwdev->fw.fw_format = fw_format;
+ rtwdev->support_mlo = support_mlo;
- rtw89_debug(rtwdev, RTW89_DBG_FW, "probe driver %s chanctx\n",
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "probe driver %s chanctx\n",
no_chanctx ? "without" : "with");
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "probe driver %s MLO cap\n",
+ support_mlo ? "with" : "without");
return rtwdev;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 11fa003a9788..4ed9034fdb46 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -21,6 +21,7 @@ struct rtw89_efuse_block_cfg;
struct rtw89_h2c_rf_tssi;
struct rtw89_fw_txpwr_track_cfg;
struct rtw89_phy_rfk_log_fmt;
+struct rtw89_debugfs;
extern const struct ieee80211_ops rtw89_ops;
@@ -796,16 +797,24 @@ struct rtw89_rx_phy_ppdu {
u8 chan_idx;
u8 ie;
u16 rate;
+ u8 rpl_avg;
+ u8 rpl_path[RF_PATH_MAX];
+ u8 rpl_fd[RF_PATH_MAX];
+ u8 bw_idx;
+ u8 rx_path_en;
struct {
bool has;
u8 avg_snr;
u8 evm_max;
u8 evm_min;
} ofdm;
+ bool has_data;
+ bool has_bcn;
bool ldpc;
bool stbc;
bool to_self;
bool valid;
+ bool hdr_2_en;
};
enum rtw89_mac_idx {
@@ -820,12 +829,12 @@ enum rtw89_phy_idx {
RTW89_PHY_MAX
};
-enum rtw89_sub_entity_idx {
- RTW89_SUB_ENTITY_0 = 0,
- RTW89_SUB_ENTITY_1 = 1,
+enum rtw89_chanctx_idx {
+ RTW89_CHANCTX_0 = 0,
+ RTW89_CHANCTX_1 = 1,
- NUM_OF_RTW89_SUB_ENTITY,
- RTW89_SUB_ENTITY_IDLE = NUM_OF_RTW89_SUB_ENTITY,
+ NUM_OF_RTW89_CHANCTX,
+ RTW89_CHANCTX_IDLE = NUM_OF_RTW89_CHANCTX,
};
enum rtw89_rf_path {
@@ -925,10 +934,12 @@ enum rtw89_sc_offset {
RTW89_SC_40_LOWER = 10,
};
+/* only mgd features can be added to the enum */
enum rtw89_wow_flags {
RTW89_WOW_FLAG_EN_MAGIC_PKT,
RTW89_WOW_FLAG_EN_REKEY_PKT,
RTW89_WOW_FLAG_EN_DISCONNECT,
+ RTW89_WOW_FLAG_EN_PATTERN,
RTW89_WOW_FLAG_NUM,
};
@@ -1585,6 +1596,23 @@ struct rtw89_btc_wl_active_role_v2 {
u32 noa_duration; /* ms */
};
+struct rtw89_btc_wl_active_role_v7 {
+ u8 connected;
+ u8 pid;
+ u8 phy;
+ u8 noa;
+
+ u8 band;
+ u8 client_ps;
+ u8 bw;
+ u8 role;
+
+ u8 ch;
+ u8 noa_dur;
+ u8 client_cnt;
+ u8 rsvd2;
+} __packed;
+
struct rtw89_btc_wl_role_info_bpos {
u16 none: 1;
u16 station: 1;
@@ -1666,6 +1694,22 @@ struct rtw89_btc_wl_rlink { /* H2C info, struct size must be n*4 bytes */
} __packed;
#define RTW89_BE_BTC_WL_MAX_ROLE_NUMBER 6
+struct rtw89_btc_wl_role_info_v7 { /* struct size must be n*4 bytes */
+ u8 connect_cnt;
+ u8 link_mode;
+ u8 link_mode_chg;
+ u8 p2p_2g;
+
+ struct rtw89_btc_wl_active_role_v7 active_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
+
+ u32 role_map;
+ u32 mrole_type; /* btc_wl_mrole_type */
+ u32 mrole_noa_duration; /* ms */
+ u32 dbcc_en;
+ u32 dbcc_chg;
+ u32 dbcc_2g_phy; /* which phy operate in 2G, HW_PHY_0 or HW_PHY_1 */
+} __packed;
+
struct rtw89_btc_wl_role_info_v8 { /* H2C info, struct size must be n*4 bytes */
u8 connect_cnt;
u8 link_mode;
@@ -1829,6 +1873,7 @@ struct rtw89_btc_wl_info {
struct rtw89_btc_wl_role_info role_info;
struct rtw89_btc_wl_role_info_v1 role_info_v1;
struct rtw89_btc_wl_role_info_v2 role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 role_info_v7;
struct rtw89_btc_wl_role_info_v8 role_info_v8;
struct rtw89_btc_wl_scan_info scan_info;
struct rtw89_btc_wl_dbcc_info dbcc_info;
@@ -1846,8 +1891,10 @@ struct rtw89_btc_wl_info {
bool is_5g_hi_channel;
bool pta_reg_mac_chg;
bool bg_mode;
+ bool he_mode;
bool scbd_change;
bool fw_ver_mismatch;
+ bool client_cnt_inc_2g;
u32 scbd;
};
@@ -2198,6 +2245,19 @@ struct rtw89_btc_fbtc_rpt_ctrl_v105 {
struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_v7 {
+ u8 fver;
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+
+ u8 gnt_val[RTW89_PHY_MAX][4];
+ __le16 bt_cnt[BTC_BCNT_STA_MAX_V105];
+
+ struct rtw89_btc_fbtc_rpt_ctrl_info_v8 rpt_info;
+ struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
+} __packed;
+
struct rtw89_btc_fbtc_rpt_ctrl_v8 {
u8 fver;
u8 rsvd0;
@@ -2216,6 +2276,7 @@ union rtw89_btc_fbtc_rpt_ctrl_ver_info {
struct rtw89_btc_fbtc_rpt_ctrl_v4 v4;
struct rtw89_btc_fbtc_rpt_ctrl_v5 v5;
struct rtw89_btc_fbtc_rpt_ctrl_v105 v105;
+ struct rtw89_btc_fbtc_rpt_ctrl_v7 v7;
struct rtw89_btc_fbtc_rpt_ctrl_v8 v8;
};
@@ -3306,6 +3367,7 @@ struct rtw89_sta {
struct ewma_rssi avg_rssi;
struct ewma_rssi rssi[RF_PATH_MAX];
struct ewma_snr avg_snr;
+ struct ewma_evm evm_1ss;
struct ewma_evm evm_min[RF_PATH_MAX];
struct ewma_evm evm_max[RF_PATH_MAX];
struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
@@ -3403,7 +3465,7 @@ struct rtw89_vif {
struct rtw89_dev *rtwdev;
struct rtw89_roc roc;
bool chanctx_assigned; /* only valid when running with chanctx_ops */
- enum rtw89_sub_entity_idx sub_entity_idx;
+ enum rtw89_chanctx_idx chanctx_idx;
enum rtw89_reg_6ghz_power reg_6ghz_power;
struct rtw89_reg_6ghz_tpe reg_6ghz_tpe;
@@ -3537,10 +3599,12 @@ struct rtw89_chip_ops {
void (*rfk_hw_init)(struct rtw89_dev *rtwdev);
void (*rfk_init)(struct rtw89_dev *rtwdev);
void (*rfk_init_late)(struct rtw89_dev *rtwdev);
- void (*rfk_channel)(struct rtw89_dev *rtwdev);
+ void (*rfk_channel)(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void (*rfk_band_changed)(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx);
- void (*rfk_scan)(struct rtw89_dev *rtwdev, bool start);
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+ void (*rfk_scan)(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool start);
void (*rfk_track)(struct rtw89_dev *rtwdev);
void (*power_trim)(struct rtw89_dev *rtwdev);
void (*set_txpwr)(struct rtw89_dev *rtwdev,
@@ -3555,11 +3619,15 @@ struct rtw89_chip_ops {
void (*query_ppdu)(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status);
+ void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu);
void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx);
void (*cfg_txrx_path)(struct rtw89_dev *rtwdev);
void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev,
s8 pw_ofst, enum rtw89_mac_idx mac_idx);
+ void (*digital_pwr_comp)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
int (*pwr_on_func)(struct rtw89_dev *rtwdev);
int (*pwr_off_func)(struct rtw89_dev *rtwdev);
void (*query_rxdesc)(struct rtw89_dev *rtwdev,
@@ -3671,6 +3739,7 @@ struct rtw89_scan_option {
u16 slow_pd;
u16 norm_cy;
u8 opch_end;
+ u16 delay;
u64 prohib_chan;
enum rtw89_phy_idx band;
enum rtw89_scan_be_operation operation;
@@ -3909,16 +3978,22 @@ struct rtw89_txpwr_conf {
const void *data;
};
+static inline bool rtw89_txpwr_entcpy(void *entry, const void *cursor, u8 size,
+ const struct rtw89_txpwr_conf *conf)
+{
+ u8 valid_size = min(size, conf->ent_sz);
+
+ memcpy(entry, cursor, valid_size);
+ return true;
+}
+
#define rtw89_txpwr_conf_valid(conf) (!!(conf)->data)
#define rtw89_for_each_in_txpwr_conf(entry, cursor, conf) \
- for (typecheck(const void *, cursor), (cursor) = (conf)->data, \
- memcpy(&(entry), cursor, \
- min_t(u8, sizeof(entry), (conf)->ent_sz)); \
+ for (typecheck(const void *, cursor), (cursor) = (conf)->data; \
(cursor) < (conf)->data + (conf)->num_ents * (conf)->ent_sz; \
- (cursor) += (conf)->ent_sz, \
- memcpy(&(entry), cursor, \
- min_t(u8, sizeof(entry), (conf)->ent_sz)))
+ (cursor) += (conf)->ent_sz) \
+ if (rtw89_txpwr_entcpy(&(entry), cursor, sizeof(entry), conf))
struct rtw89_txpwr_byrate_data {
struct rtw89_txpwr_conf conf;
@@ -4066,6 +4141,11 @@ struct rtw89_rrsr_cfgs {
struct rtw89_reg3_def rsc;
};
+struct rtw89_rfkill_regs {
+ struct rtw89_reg3_def pinmux;
+ struct rtw89_reg3_def mode;
+};
+
struct rtw89_dig_regs {
u32 seg0_pd_reg;
u32 pd_lower_bound_mask;
@@ -4166,6 +4246,7 @@ struct rtw89_chip_info {
u8 wde_qempty_mgq_grpsel;
u32 rf_base_addr[2];
u8 support_macid_num;
+ u8 support_link_num;
u8 support_chanctx_num;
u8 support_bands;
u16 support_bandwidths;
@@ -4174,6 +4255,7 @@ struct rtw89_chip_info {
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
bool hw_sec_hdr;
+ bool hw_mgmt_tx_encrypt;
u8 rf_path_num;
u8 tx_nss;
u8 rx_nss;
@@ -4257,6 +4339,8 @@ struct rtw89_chip_info {
const struct rtw89_rrsr_cfgs *rrsr_cfgs;
struct rtw89_reg_def bss_clr_vld;
u32 bss_clr_map_reg;
+ const struct rtw89_rfkill_regs *rfkill_init;
+ struct rtw89_reg_def rfkill_get;
u32 dma_ch_mask;
const struct rtw89_edcca_regs *edcca_regs;
const struct wiphy_wowlan_support *wowlan_stub;
@@ -4327,6 +4411,8 @@ struct rtw89_mac_info {
/* see RTW89_FW_OFLD_WAIT_COND series for wait condition */
struct rtw89_wait_info fw_ofld_wait;
+ /* see RTW89_PS_WAIT_COND series for wait condition */
+ struct rtw89_wait_info ps_wait;
};
enum rtw89_fwdl_check_type {
@@ -4356,7 +4442,9 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_NO_LPS_PG,
RTW89_FW_FEATURE_BEACON_FILTER,
RTW89_FW_FEATURE_MACID_PAUSE_SLEEP,
+ RTW89_FW_FEATURE_SCAN_OFFLOAD_BE_V0,
RTW89_FW_FEATURE_WOW_REASON_V1,
+ RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V0,
};
struct rtw89_fw_suit {
@@ -4519,7 +4607,7 @@ struct rtw89_tas_info {
};
struct rtw89_chanctx_cfg {
- enum rtw89_sub_entity_idx idx;
+ enum rtw89_chanctx_idx idx;
int ref_count;
};
@@ -4544,7 +4632,7 @@ enum rtw89_entity_mode {
RTW89_ENTITY_MODE_UNHANDLED = -ESRCH,
};
-struct rtw89_sub_entity {
+struct rtw89_chanctx {
struct cfg80211_chan_def chandef;
struct rtw89_chan chan;
struct rtw89_chan_rcd rcd;
@@ -4577,11 +4665,11 @@ struct rtw89_hal {
bool ant_diversity_fixed;
bool support_cckpd;
bool support_igi;
- atomic_t roc_entity_idx;
+ atomic_t roc_chanctx_idx;
DECLARE_BITMAP(changes, NUM_OF_RTW89_CHANCTX_CHANGES);
- DECLARE_BITMAP(entity_map, NUM_OF_RTW89_SUB_ENTITY);
- struct rtw89_sub_entity sub[NUM_OF_RTW89_SUB_ENTITY];
+ DECLARE_BITMAP(entity_map, NUM_OF_RTW89_CHANCTX);
+ struct rtw89_chanctx chanctx[NUM_OF_RTW89_CHANCTX];
struct cfg80211_chan_def roc_chandef;
bool entity_active;
@@ -4615,6 +4703,7 @@ enum rtw89_flags {
RTW89_FLAG_WOWLAN,
RTW89_FLAG_FORBIDDEN_TRACK_WROK,
RTW89_FLAG_CHANGING_INTERFACE,
+ RTW89_FLAG_HW_RFKILL_STATE,
NUM_OF_RTW89_FLAGS,
};
@@ -5293,6 +5382,13 @@ struct rtw89_wow_param {
u8 gtk_alg;
u8 ptk_keyidx;
u8 akm;
+
+ /* see RTW89_WOW_WAIT_COND series for wait condition */
+ struct rtw89_wait_info wait;
+
+ bool pno_inited;
+ struct list_head pno_pkt_list;
+ struct cfg80211_sched_scan_request *nd_config;
};
struct rtw89_mcc_limit {
@@ -5399,6 +5495,7 @@ struct rtw89_dev {
const struct ieee80211_ops *ops;
bool dbcc_en;
+ bool support_mlo;
enum rtw89_mlo_dbcc_mode mlo_dbcc_mode;
struct rtw89_hw_scan_info scan_info;
const struct rtw89_chip_info *chip;
@@ -5505,6 +5602,8 @@ struct rtw89_dev {
struct napi_struct napi;
int napi_budget_countdown;
+ struct rtw89_debugfs *debugfs;
+
/* HCI related data, keep last */
u8 priv[] __aligned(sizeof(void *));
};
@@ -6028,33 +6127,33 @@ void rtw89_chip_set_channel_done(struct rtw89_dev *rtwdev,
static inline
const struct cfg80211_chan_def *rtw89_chandef_get(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx)
+ enum rtw89_chanctx_idx idx)
{
struct rtw89_hal *hal = &rtwdev->hal;
- enum rtw89_sub_entity_idx roc_idx = atomic_read(&hal->roc_entity_idx);
+ enum rtw89_chanctx_idx roc_idx = atomic_read(&hal->roc_chanctx_idx);
if (roc_idx == idx)
return &hal->roc_chandef;
- return &hal->sub[idx].chandef;
+ return &hal->chanctx[idx].chandef;
}
static inline
const struct rtw89_chan *rtw89_chan_get(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx)
+ enum rtw89_chanctx_idx idx)
{
struct rtw89_hal *hal = &rtwdev->hal;
- return &hal->sub[idx].chan;
+ return &hal->chanctx[idx].chan;
}
static inline
const struct rtw89_chan_rcd *rtw89_chan_rcd_get(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx)
+ enum rtw89_chanctx_idx idx)
{
struct rtw89_hal *hal = &rtwdev->hal;
- return &hal->sub[idx].rcd;
+ return &hal->chanctx[idx].rcd;
}
static inline
@@ -6064,9 +6163,9 @@ const struct rtw89_chan *rtw89_scan_chan_get(struct rtw89_dev *rtwdev)
struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
if (rtwvif)
- return rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
+ return rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
else
- return rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ return rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
}
static inline void rtw89_chip_fem_setup(struct rtw89_dev *rtwdev)
@@ -6140,29 +6239,32 @@ static inline void rtw89_chip_rfk_init_late(struct rtw89_dev *rtwdev)
chip->ops->rfk_init_late(rtwdev);
}
-static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev)
+static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->rfk_channel)
- chip->ops->rfk_channel(rtwdev);
+ chip->ops->rfk_channel(rtwdev, rtwvif);
}
static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->rfk_band_changed)
- chip->ops->rfk_band_changed(rtwdev, phy_idx);
+ chip->ops->rfk_band_changed(rtwdev, phy_idx, chan);
}
-static inline void rtw89_chip_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+static inline void rtw89_chip_rfk_scan(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool start)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->rfk_scan)
- chip->ops->rfk_scan(rtwdev, start);
+ chip->ops->rfk_scan(rtwdev, rtwvif, start);
}
static inline void rtw89_chip_rfk_track(struct rtw89_dev *rtwdev)
@@ -6219,6 +6321,15 @@ static inline void rtw89_chip_query_ppdu(struct rtw89_dev *rtwdev,
chip->ops->query_ppdu(rtwdev, phy_ppdu, status);
}
+static inline void rtw89_chip_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->convert_rpl_to_rssi)
+ chip->ops->convert_rpl_to_rssi(rtwdev, phy_ppdu);
+}
+
static inline void rtw89_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx)
{
@@ -6250,6 +6361,15 @@ void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
chip->ops->set_txpwr_ul_tb_offset(rtwdev, 0, rtwvif->mac_idx);
}
+static inline void rtw89_chip_digital_pwr_comp(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->digital_pwr_comp)
+ chip->ops->digital_pwr_comp(rtwdev, phy_idx);
+}
+
static inline void rtw89_load_txpwr_table(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl)
{
@@ -6503,6 +6623,7 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta,
struct cfg80211_tid_config *tid_config);
+void rtw89_core_rfkill_poll(struct rtw89_dev *rtwdev, bool force);
void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks);
int rtw89_core_init(struct rtw89_dev *rtwdev);
void rtw89_core_deinit(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 9e1353cce9cc..29f85210f919 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -52,6 +52,27 @@ struct rtw89_debugfs_priv {
};
};
+struct rtw89_debugfs {
+ struct rtw89_debugfs_priv read_reg;
+ struct rtw89_debugfs_priv write_reg;
+ struct rtw89_debugfs_priv read_rf;
+ struct rtw89_debugfs_priv write_rf;
+ struct rtw89_debugfs_priv rf_reg_dump;
+ struct rtw89_debugfs_priv txpwr_table;
+ struct rtw89_debugfs_priv mac_reg_dump;
+ struct rtw89_debugfs_priv mac_mem_dump;
+ struct rtw89_debugfs_priv mac_dbg_port_dump;
+ struct rtw89_debugfs_priv send_h2c;
+ struct rtw89_debugfs_priv early_h2c;
+ struct rtw89_debugfs_priv fw_crash;
+ struct rtw89_debugfs_priv btc_info;
+ struct rtw89_debugfs_priv btc_manual;
+ struct rtw89_debugfs_priv fw_log_manual;
+ struct rtw89_debugfs_priv phy_info;
+ struct rtw89_debugfs_priv stations;
+ struct rtw89_debugfs_priv disable_dm;
+};
+
static const u16 rtw89_rate_info_bw_to_mhz_map[] = {
[RATE_INFO_BW_20] = 20,
[RATE_INFO_BW_40] = 40,
@@ -851,7 +872,7 @@ static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v)
mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
- chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
rtw89_debug_priv_txpwr_table_get_regd(m, rtwdev, chan);
@@ -3463,9 +3484,9 @@ static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp,
return count;
}
-static ssize_t rtw89_debug_fw_log_manual_set(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+static ssize_t rtw89_debug_priv_fw_log_manual_set(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *loff)
{
struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
@@ -3505,7 +3526,7 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
struct rtw89_hal *hal = &rtwdev->hal;
u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num;
bool ant_asterisk = hal->tx_path_diversity || hal->ant_diversity;
- u8 evm_min, evm_max;
+ u8 evm_min, evm_max, evm_1ss;
u8 rssi;
u8 snr;
int i;
@@ -3574,7 +3595,8 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
}
seq_puts(m, "]\n");
- seq_puts(m, "EVM: [");
+ evm_1ss = ewma_evm_read(&rtwsta->evm_1ss);
+ seq_printf(m, "EVM: [%2u.%02u, ", evm_1ss >> 2, (evm_1ss & 0x3) * 25);
for (i = 0; i < (hal->ant_diversity ? 2 : 1); i++) {
evm_min = ewma_evm_read(&rtwsta->evm_min[i]);
evm_max = ewma_evm_read(&rtwsta->evm_max[i]);
@@ -3853,92 +3875,55 @@ rtw89_debug_priv_disable_dm_set(struct file *filp, const char __user *user_buf,
return count;
}
-static struct rtw89_debugfs_priv rtw89_debug_priv_read_reg = {
- .cb_read = rtw89_debug_priv_read_reg_get,
- .cb_write = rtw89_debug_priv_read_reg_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_write_reg = {
- .cb_write = rtw89_debug_priv_write_reg_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_read_rf = {
- .cb_read = rtw89_debug_priv_read_rf_get,
- .cb_write = rtw89_debug_priv_read_rf_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_write_rf = {
- .cb_write = rtw89_debug_priv_write_rf_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_rf_reg_dump = {
- .cb_read = rtw89_debug_priv_rf_reg_dump_get,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_txpwr_table = {
- .cb_read = rtw89_debug_priv_txpwr_table_get,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_mac_reg_dump = {
- .cb_read = rtw89_debug_priv_mac_reg_dump_get,
- .cb_write = rtw89_debug_priv_mac_reg_dump_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_mac_mem_dump = {
- .cb_read = rtw89_debug_priv_mac_mem_dump_get,
- .cb_write = rtw89_debug_priv_mac_mem_dump_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_mac_dbg_port_dump = {
- .cb_read = rtw89_debug_priv_mac_dbg_port_dump_get,
- .cb_write = rtw89_debug_priv_mac_dbg_port_dump_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_send_h2c = {
- .cb_write = rtw89_debug_priv_send_h2c_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_early_h2c = {
- .cb_read = rtw89_debug_priv_early_h2c_get,
- .cb_write = rtw89_debug_priv_early_h2c_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_fw_crash = {
- .cb_read = rtw89_debug_priv_fw_crash_get,
- .cb_write = rtw89_debug_priv_fw_crash_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_btc_info = {
- .cb_read = rtw89_debug_priv_btc_info_get,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_btc_manual = {
- .cb_write = rtw89_debug_priv_btc_manual_set,
-};
+#define rtw89_debug_priv_get(name) \
+{ \
+ .cb_read = rtw89_debug_priv_ ##name## _get, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_fw_log_manual = {
- .cb_write = rtw89_debug_fw_log_manual_set,
-};
+#define rtw89_debug_priv_set(name) \
+{ \
+ .cb_write = rtw89_debug_priv_ ##name## _set, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_phy_info = {
- .cb_read = rtw89_debug_priv_phy_info_get,
-};
+#define rtw89_debug_priv_select_and_get(name) \
+{ \
+ .cb_write = rtw89_debug_priv_ ##name## _select, \
+ .cb_read = rtw89_debug_priv_ ##name## _get, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_stations = {
- .cb_read = rtw89_debug_priv_stations_get,
-};
+#define rtw89_debug_priv_set_and_get(name) \
+{ \
+ .cb_write = rtw89_debug_priv_ ##name## _set, \
+ .cb_read = rtw89_debug_priv_ ##name## _get, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_disable_dm = {
- .cb_read = rtw89_debug_priv_disable_dm_get,
- .cb_write = rtw89_debug_priv_disable_dm_set,
+static const struct rtw89_debugfs rtw89_debugfs_templ = {
+ .read_reg = rtw89_debug_priv_select_and_get(read_reg),
+ .write_reg = rtw89_debug_priv_set(write_reg),
+ .read_rf = rtw89_debug_priv_select_and_get(read_rf),
+ .write_rf = rtw89_debug_priv_set(write_rf),
+ .rf_reg_dump = rtw89_debug_priv_get(rf_reg_dump),
+ .txpwr_table = rtw89_debug_priv_get(txpwr_table),
+ .mac_reg_dump = rtw89_debug_priv_select_and_get(mac_reg_dump),
+ .mac_mem_dump = rtw89_debug_priv_select_and_get(mac_mem_dump),
+ .mac_dbg_port_dump = rtw89_debug_priv_select_and_get(mac_dbg_port_dump),
+ .send_h2c = rtw89_debug_priv_set(send_h2c),
+ .early_h2c = rtw89_debug_priv_set_and_get(early_h2c),
+ .fw_crash = rtw89_debug_priv_set_and_get(fw_crash),
+ .btc_info = rtw89_debug_priv_get(btc_info),
+ .btc_manual = rtw89_debug_priv_set(btc_manual),
+ .fw_log_manual = rtw89_debug_priv_set(fw_log_manual),
+ .phy_info = rtw89_debug_priv_get(phy_info),
+ .stations = rtw89_debug_priv_get(stations),
+ .disable_dm = rtw89_debug_priv_set_and_get(disable_dm),
};
#define rtw89_debugfs_add(name, mode, fopname, parent) \
do { \
- rtw89_debug_priv_ ##name.rtwdev = rtwdev; \
- if (!debugfs_create_file(#name, mode, \
- parent, &rtw89_debug_priv_ ##name, \
- &file_ops_ ##fopname)) \
+ struct rtw89_debugfs_priv *priv = &rtwdev->debugfs->name; \
+ priv->rtwdev = rtwdev; \
+ if (IS_ERR(debugfs_create_file(#name, mode, parent, priv, \
+ &file_ops_ ##fopname))) \
pr_debug("Unable to initialize debugfs:%s\n", #name); \
} while (0)
@@ -3949,13 +3934,9 @@ static struct rtw89_debugfs_priv rtw89_debug_priv_disable_dm = {
#define rtw89_debugfs_add_r(name) \
rtw89_debugfs_add(name, S_IFREG | 0444, single_r, debugfs_topdir)
-void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
+static
+void rtw89_debugfs_add_sec0(struct rtw89_dev *rtwdev, struct dentry *debugfs_topdir)
{
- struct dentry *debugfs_topdir;
-
- debugfs_topdir = debugfs_create_dir("rtw89",
- rtwdev->hw->wiphy->debugfsdir);
-
rtw89_debugfs_add_rw(read_reg);
rtw89_debugfs_add_w(write_reg);
rtw89_debugfs_add_rw(read_rf);
@@ -3965,6 +3946,11 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
rtw89_debugfs_add_rw(mac_reg_dump);
rtw89_debugfs_add_rw(mac_mem_dump);
rtw89_debugfs_add_rw(mac_dbg_port_dump);
+}
+
+static
+void rtw89_debugfs_add_sec1(struct rtw89_dev *rtwdev, struct dentry *debugfs_topdir)
+{
rtw89_debugfs_add_w(send_h2c);
rtw89_debugfs_add_rw(early_h2c);
rtw89_debugfs_add_rw(fw_crash);
@@ -3975,6 +3961,27 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
rtw89_debugfs_add_r(stations);
rtw89_debugfs_add_rw(disable_dm);
}
+
+void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
+{
+ struct dentry *debugfs_topdir;
+
+ rtwdev->debugfs = kmemdup(&rtw89_debugfs_templ,
+ sizeof(rtw89_debugfs_templ), GFP_KERNEL);
+ if (!rtwdev->debugfs)
+ return;
+
+ debugfs_topdir = debugfs_create_dir("rtw89",
+ rtwdev->hw->wiphy->debugfsdir);
+
+ rtw89_debugfs_add_sec0(rtwdev, debugfs_topdir);
+ rtw89_debugfs_add_sec1(rtwdev, debugfs_topdir);
+}
+
+void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev)
+{
+ kfree(rtwdev->debugfs);
+}
#endif
#ifdef CONFIG_RTW89_DEBUGMSG
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index 800ea59873a1..fc690f7c55dc 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -49,8 +49,10 @@ enum rtw89_debug_mac_reg_sel {
#ifdef CONFIG_RTW89_DEBUGFS
void rtw89_debugfs_init(struct rtw89_dev *rtwdev);
+void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev);
#else
static inline void rtw89_debugfs_init(struct rtw89_dev *rtwdev) {}
+static inline void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev) {}
#endif
#define rtw89_info(rtwdev, a...) dev_info((rtwdev)->dev, ##a)
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index fbe08c162b93..d9b0e7ebe619 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -670,6 +670,10 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
@@ -679,8 +683,10 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -2491,7 +2497,7 @@ fail:
int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_h2c_lps_ch_info *h2c;
u32 len = sizeof(*h2c);
@@ -2810,7 +2816,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
struct sk_buff *skb;
u8 pads[RTW89_PPE_BW_NUM];
u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
@@ -2943,9 +2949,9 @@ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
u8 pads[RTW89_PPE_BW_NUM];
@@ -3206,7 +3212,7 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct rtw89_h2c_bcn_upd *h2c;
struct sk_buff *skb_beacon;
@@ -3285,7 +3291,7 @@ EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon);
int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct rtw89_h2c_bcn_upd_be *h2c;
struct sk_buff *skb_beacon;
@@ -4319,6 +4325,52 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7;
+ struct rtw89_h2c_cxrole_v7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data;
+
+ h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fwlrole;
+ h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
+ memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
+ h2c->_u32.role_map = cpu_to_le32(role->role_map);
+ h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
+ h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
+ h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en);
+ h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg);
+ h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -4337,6 +4389,7 @@ int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data;
h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fwlrole;
h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
h2c->_u32.role_map = cpu_to_le32(role->role_map);
@@ -4417,7 +4470,7 @@ int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type)
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
- rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n");
return -ENOMEM;
}
skb_put(skb, len);
@@ -4802,16 +4855,20 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
return 0;
}
-int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
- struct rtw89_scan_option *option,
- struct rtw89_vif *rtwvif)
+#define RTW89_SCAN_DELAY_TSF_UNIT 104800
+int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option,
+ struct rtw89_vif *rtwvif,
+ bool wowlan)
{
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
+ enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE;
struct rtw89_h2c_scanofld *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
unsigned int cond;
+ u64 tsf = 0;
int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
@@ -4822,6 +4879,17 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
skb_put(skb, len);
h2c = (struct rtw89_h2c_scanofld *)skb->data;
+ if (option->delay) {
+ ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif, &tsf);
+ if (ret) {
+ rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret);
+ scan_mode = RTW89_SCAN_IMMEDIATE;
+ } else {
+ scan_mode = RTW89_SCAN_DELAY;
+ tsf += option->delay * RTW89_SCAN_DELAY_TSF_UNIT;
+ }
+ }
+
h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) |
le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) |
le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) |
@@ -4830,9 +4898,11 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) |
le32_encode_bits(option->target_ch_mode,
RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) |
- le32_encode_bits(RTW89_SCAN_IMMEDIATE,
- RTW89_H2C_SCANOFLD_W1_START_MODE) |
- le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE);
+ le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) |
+ le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE);
+
+ h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) |
+ le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD);
if (option->target_ch_mode) {
h2c->w1 |= le32_encode_bits(op->band_width,
@@ -4845,6 +4915,11 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND);
}
+ h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf),
+ RTW89_H2C_SCANOFLD_W3_TSF_HIGH);
+ h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf),
+ RTW89_H2C_SCANOFLD_W4_TSF_LOW);
+
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
H2C_FUNC_SCANOFLD, 1, 1,
@@ -4888,7 +4963,8 @@ static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *option,
- struct rtw89_vif *rtwvif)
+ struct rtw89_vif *rtwvif,
+ bool wowlan)
{
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
@@ -4902,6 +4978,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
u8 opch_size = sizeof(*opch) * option->num_opch;
u8 probe_id[NUM_NL80211_BANDS];
+ u8 cfg_len = sizeof(*h2c);
unsigned int cond;
void *ptr;
int ret;
@@ -4910,7 +4987,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
rtw89_scan_get_6g_disabled_chan(rtwdev, option);
- len = sizeof(*h2c) + macc_role_size + opch_size;
+ len = cfg_len + macc_role_size + opch_size;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
@@ -4923,11 +5000,13 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id));
- list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
- if (pkt_info->wildcard_6ghz) {
- /* Provide wildcard as template */
- probe_id[NL80211_BAND_6GHZ] = pkt_info->id;
- break;
+ if (!wowlan) {
+ list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
+ if (pkt_info->wildcard_6ghz) {
+ /* Provide wildcard as template */
+ probe_id[NL80211_BAND_6GHZ] = pkt_info->id;
+ break;
+ }
}
}
@@ -4958,7 +5037,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) |
le32_encode_bits(probe_id[NL80211_BAND_6GHZ],
RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) |
- le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
+ le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE);
@@ -4966,7 +5045,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW);
h2c->w7 = le32_encode_bits(option->prohib_chan >> 32,
RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH);
- if (req->no_cck) {
+ if (!wowlan && req->no_cck) {
h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE);
h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6,
RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) |
@@ -4975,10 +5054,24 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
le32_encode_bits(RTW89_HW_RATE_OFDM6,
RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ);
}
- ptr += sizeof(*h2c);
+
+ if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) {
+ cfg_len = offsetofend(typeof(*h2c), w8);
+ goto flex_member;
+ }
+
+ h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0),
+ RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) |
+ le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0),
+ RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) |
+ le32_encode_bits(sizeof(*opch) / sizeof(opch->w0),
+ RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP);
+
+flex_member:
+ ptr += cfg_len;
for (i = 0; i < option->num_macc_role; i++) {
- macc_role = (struct rtw89_h2c_scanofld_be_macc_role *)&h2c->role[i];
+ macc_role = ptr;
macc_role->w0 =
le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) |
le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) |
@@ -5132,14 +5225,21 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0;
struct rtw89_fw_h2c_rfk_pre_info *h2c;
u8 tbl_sel = rfk_mcc->table_idx;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
+ u8 ver = U8_MAX;
u8 tbl, path;
u32 val32;
int ret;
+ if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
+ len = sizeof(*h2c_v0);
+ ver = 0;
+ }
+
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n");
@@ -5148,41 +5248,53 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
skb_put(skb, len);
h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
- h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+ h2c->common.mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
- h2c->dbcc.ch[path][tbl] = cpu_to_le32(rfk_mcc->ch[tbl]);
- h2c->dbcc.band[path][tbl] = cpu_to_le32(rfk_mcc->band[tbl]);
+ h2c->common.dbcc.ch[path][tbl] =
+ cpu_to_le32(rfk_mcc->ch[tbl]);
+ h2c->common.dbcc.band[path][tbl] =
+ cpu_to_le32(rfk_mcc->band[tbl]);
}
}
for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
- h2c->tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
- h2c->tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]);
+ h2c->common.tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
+ h2c->common.tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]);
}
- h2c->phy_idx = cpu_to_le32(phy_idx);
- h2c->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]);
- h2c->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]);
- h2c->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
-
- val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1);
- h2c->ktbl_sel0 = cpu_to_le32(val32);
- val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1);
- h2c->ktbl_sel1 = cpu_to_le32(val32);
- val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
- h2c->rfmod0 = cpu_to_le32(val32);
- val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
- h2c->rfmod1 = cpu_to_le32(val32);
+ h2c->common.phy_idx = cpu_to_le32(phy_idx);
- if (rtw89_is_mlo_1_1(rtwdev))
- h2c->mlo_1_1 = cpu_to_le32(1);
+ if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */
+ h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data;
+
+ h2c_v0->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]);
+ h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]);
+ h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
+
+ val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1);
+ h2c_v0->ktbl_sel0 = cpu_to_le32(val32);
+ val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1);
+ h2c_v0->ktbl_sel1 = cpu_to_le32(val32);
+ val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ h2c_v0->rfmod0 = cpu_to_le32(val32);
+ val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
+ h2c_v0->rfmod1 = cpu_to_le32(val32);
+
+ if (rtw89_is_mlo_1_1(rtwdev))
+ h2c_v0->mlo_1_1 = cpu_to_le32(1);
+
+ h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type);
- h2c->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type);
+ goto done;
+ }
+ if (rtw89_is_mlo_1_1(rtwdev))
+ h2c->mlo_1_1 = cpu_to_le32(1);
+done:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
H2C_FUNC_RFK_PRE_NOTIFY, 0, 0,
@@ -5202,10 +5314,8 @@ fail:
}
int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- enum rtw89_tssi_mode tssi_mode)
+ const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_SUB_ENTITY_0);
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_h2c_rf_tssi *h2c;
u32 len = sizeof(*h2c);
@@ -5249,7 +5359,8 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
struct rtw89_h2c_rf_iqk *h2c;
u32 len = sizeof(*h2c);
@@ -5284,10 +5395,9 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_SUB_ENTITY_0);
struct rtw89_h2c_rf_dpk *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
@@ -5327,10 +5437,9 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_SUB_ENTITY_0);
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_h2c_rf_txgapk *h2c;
u32 len = sizeof(*h2c);
@@ -5371,7 +5480,8 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
struct rtw89_h2c_rf_dack *h2c;
u32 len = sizeof(*h2c);
@@ -5407,10 +5517,9 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_SUB_ENTITY_0);
struct rtw89_h2c_rf_rxdck *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
@@ -5926,6 +6035,56 @@ out:
return ret;
}
+static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev,
+ int chan_type, int ssid_num,
+ struct rtw89_mac_chinfo *ch_info)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_pktofld_info *info;
+ u8 probe_count = 0;
+
+ ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
+ ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
+ ch_info->bw = RTW89_SCAN_WIDTH;
+ ch_info->tx_pkt = true;
+ ch_info->cfg_tx_pwr = false;
+ ch_info->tx_pwr_idx = 0;
+ ch_info->tx_null = false;
+ ch_info->pause_data = false;
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+
+ if (ssid_num) {
+ list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) {
+ if (info->channel_6ghz &&
+ ch_info->pri_ch != info->channel_6ghz)
+ continue;
+ else if (info->channel_6ghz && probe_count != 0)
+ ch_info->period += RTW89_CHANNEL_TIME_6G;
+
+ if (info->wildcard_6ghz)
+ continue;
+
+ ch_info->pkt_id[probe_count++] = info->id;
+ if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
+ break;
+ }
+ ch_info->num_pkt = probe_count;
+ }
+
+ switch (chan_type) {
+ case RTW89_CHAN_DFS:
+ if (ch_info->ch_band != RTW89_BAND_6G)
+ ch_info->period = max_t(u8, ch_info->period,
+ RTW89_DFS_CHAN_TIME);
+ ch_info->dwell_time = RTW89_DWELL_TIME;
+ break;
+ case RTW89_CHAN_ACTIVE:
+ break;
+ default:
+ rtw89_err(rtwdev, "Channel type out of bound\n");
+ }
+}
+
static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
int ssid_num,
struct rtw89_mac_chinfo *ch_info)
@@ -6004,6 +6163,45 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
}
}
+static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
+ int ssid_num,
+ struct rtw89_mac_chinfo_be *ch_info)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_pktofld_info *info;
+ u8 probe_count = 0, i;
+
+ ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
+ ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
+ ch_info->bw = RTW89_SCAN_WIDTH;
+ ch_info->tx_null = false;
+ ch_info->pause_data = false;
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+
+ if (ssid_num) {
+ list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) {
+ ch_info->pkt_id[probe_count++] = info->id;
+ if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
+ break;
+ }
+ }
+
+ for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
+ ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
+
+ switch (chan_type) {
+ case RTW89_CHAN_DFS:
+ ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
+ ch_info->dwell_time = RTW89_DWELL_TIME;
+ break;
+ case RTW89_CHAN_ACTIVE:
+ break;
+ default:
+ rtw89_warn(rtwdev, "Channel type out of bound\n");
+ break;
+ }
+}
+
static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
int ssid_num,
struct rtw89_mac_chinfo_be *ch_info)
@@ -6066,8 +6264,58 @@ static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
}
}
-int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif, bool connected)
+int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
+ struct rtw89_mac_chinfo *ch_info, *tmp;
+ struct ieee80211_channel *channel;
+ struct list_head chan_list;
+ int list_len;
+ enum rtw89_chan_type type;
+ int ret = 0;
+ u32 idx;
+
+ INIT_LIST_HEAD(&chan_list);
+ for (idx = 0, list_len = 0;
+ idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx++, list_len++) {
+ channel = nd_config->channels[idx];
+ ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
+ if (!ch_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ch_info->period = RTW89_CHANNEL_TIME;
+ ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
+ ch_info->central_ch = channel->hw_value;
+ ch_info->pri_ch = channel->hw_value;
+ ch_info->is_psc = cfg80211_channel_is_psc(channel);
+
+ if (channel->flags &
+ (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+ type = RTW89_CHAN_DFS;
+ else
+ type = RTW89_CHAN_ACTIVE;
+
+ rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info);
+ list_add_tail(&ch_info->list, &chan_list);
+ }
+ ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
+
+out:
+ list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
+int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected)
{
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct rtw89_mac_chinfo *ch_info, *tmp;
@@ -6143,6 +6391,58 @@ out:
return ret;
}
+int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
+ struct rtw89_mac_chinfo_be *ch_info, *tmp;
+ struct ieee80211_channel *channel;
+ struct list_head chan_list;
+ enum rtw89_chan_type type;
+ int list_len, ret;
+ u32 idx;
+
+ INIT_LIST_HEAD(&chan_list);
+
+ for (idx = 0, list_len = 0;
+ idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx++, list_len++) {
+ channel = nd_config->channels[idx];
+ ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
+ if (!ch_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ch_info->period = RTW89_CHANNEL_TIME;
+ ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
+ ch_info->central_ch = channel->hw_value;
+ ch_info->pri_ch = channel->hw_value;
+ ch_info->is_psc = cfg80211_channel_is_psc(channel);
+
+ if (channel->flags &
+ (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+ type = RTW89_CHAN_DFS;
+ else
+ type = RTW89_CHAN_ACTIVE;
+
+ rtw89_pno_scan_add_chan_be(rtwdev, type,
+ nd_config->n_match_sets, ch_info);
+ list_add_tail(&ch_info->list, &chan_list);
+ }
+
+ ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list);
+
+out:
+ list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool connected)
{
@@ -6352,7 +6652,7 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID;
}
- ret = mac->scan_offload(rtwdev, &opt, rtwvif);
+ ret = mac->scan_offload(rtwdev, &opt, rtwvif, false);
out:
return ret;
}
@@ -6602,6 +6902,57 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
+ struct rtw89_h2c_cfg_nlo *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret, i;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for nlo\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cfg_nlo *)skb->data;
+
+ h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) |
+ le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) |
+ le32_encode_bits(rtwvif->mac_id, RTW89_H2C_NLO_W0_MACID);
+
+ if (enable) {
+ h2c->nlo_cnt = nd_config->n_match_sets;
+ for (i = 0 ; i < nd_config->n_match_sets; i++) {
+ h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len;
+ memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid_len);
+ }
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_WOW,
+ H2C_FUNC_NLO, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+ return ret;
+}
+
int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool enable)
{
@@ -6816,20 +7167,46 @@ hdr:
goto fail;
}
return 0;
-
fail:
dev_kfree_skb_any(skb);
return ret;
}
+int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait;
+ struct rtw89_h2c_fwips *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw ips\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_fwips *)skb->data;
+
+ h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_FW_IPS_W0_MACID) |
+ le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_PS,
+ H2C_FUNC_IPS_CFG, 0, 1,
+ len);
+
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG);
+}
+
int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
{
- struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_wait_info *wait = &rtwdev->wow.wait;
struct rtw89_h2c_wow_aoac *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
- unsigned int cond;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@@ -6848,8 +7225,7 @@ int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
H2C_FUNC_AOAC_REPORT_REQ, 1, 0,
len);
- cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ);
- return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC);
}
/* Return < 0, if failures happen during waiting for the condition.
@@ -7341,7 +7717,7 @@ int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
}
-int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx)
+int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx)
{
struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
struct rtw89_h2c_mrc_del *h2c;
@@ -7358,7 +7734,8 @@ int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx)
skb_put(skb, len);
h2c = (struct rtw89_h2c_mrc_del *)skb->data;
- h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX);
+ h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) |
+ le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index c3b4324c621c..ad47e77d740b 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -1898,6 +1898,24 @@ struct rtw89_h2c_wow_global {
#define RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO GENMASK(23, 16)
#define RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO GENMASK(31, 24)
+#define RTW89_MAX_SUPPORT_NL_NUM 16
+struct rtw89_h2c_cfg_nlo {
+ __le32 w0;
+ u8 nlo_cnt;
+ u8 rsvd[3];
+ __le32 patterncheck;
+ __le32 rsvd1;
+ __le32 rsvd2;
+ u8 ssid_len[RTW89_MAX_SUPPORT_NL_NUM];
+ u8 chiper[RTW89_MAX_SUPPORT_NL_NUM];
+ u8 rsvd3[24];
+ u8 ssid[RTW89_MAX_SUPPORT_NL_NUM][IEEE80211_MAX_SSID_LEN];
+} __packed;
+
+#define RTW89_H2C_NLO_W0_ENABLE BIT(0)
+#define RTW89_H2C_NLO_W0_IGNORE_CIPHER BIT(2)
+#define RTW89_H2C_NLO_W0_MACID GENMASK(31, 24)
+
static inline void RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(void *h2c, u32 val)
{
le32p_replace_bits((__le32 *)h2c, val, BIT(0));
@@ -2089,10 +2107,15 @@ enum rtw89_btc_cxdrvinfo {
enum rtw89_scan_mode {
RTW89_SCAN_IMMEDIATE,
+ RTW89_SCAN_DELAY,
};
enum rtw89_scan_type {
RTW89_SCAN_ONCE,
+ RTW89_SCAN_NORMAL,
+ RTW89_SCAN_NORMAL_SLOW,
+ RTW89_SCAN_SEAMLESS,
+ RTW89_SCAN_MAX,
};
static inline void RTW89_SET_FWCMD_CXHDR_TYPE(void *cmd, u8 val)
@@ -2124,6 +2147,30 @@ struct rtw89_h2c_cxctrl_v7 {
#define H2C_LEN_CXDRVHDR sizeof(struct rtw89_h2c_cxhdr)
#define H2C_LEN_CXDRVHDR_V7 sizeof(struct rtw89_h2c_cxhdr_v7)
+struct rtw89_btc_wl_role_info_v7_u8 {
+ u8 connect_cnt;
+ u8 link_mode;
+ u8 link_mode_chg;
+ u8 p2p_2g;
+
+ struct rtw89_btc_wl_active_role_v7 active_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
+} __packed;
+
+struct rtw89_btc_wl_role_info_v7_u32 {
+ __le32 role_map;
+ __le32 mrole_type;
+ __le32 mrole_noa_duration;
+ __le32 dbcc_en;
+ __le32 dbcc_chg;
+ __le32 dbcc_2g_phy;
+} __packed;
+
+struct rtw89_h2c_cxrole_v7 {
+ struct rtw89_h2c_cxhdr_v7 hdr;
+ struct rtw89_btc_wl_role_info_v7_u8 _u8;
+ struct rtw89_btc_wl_role_info_v7_u32 _u32;
+} __packed;
+
struct rtw89_btc_wl_role_info_v8_u8 {
u8 connect_cnt;
u8 link_mode;
@@ -2145,7 +2192,7 @@ struct rtw89_btc_wl_role_info_v8_u32 {
} __packed;
struct rtw89_h2c_cxrole_v8 {
- struct rtw89_h2c_cxhdr hdr;
+ struct rtw89_h2c_cxhdr_v7 hdr;
struct rtw89_btc_wl_role_info_v8_u8 _u8;
struct rtw89_btc_wl_role_info_v8_u32 _u32;
} __packed;
@@ -2664,6 +2711,8 @@ struct rtw89_h2c_scanofld {
#define RTW89_H2C_SCANOFLD_W1_PROBE_REQ_PKT_ID GENMASK(31, 24)
#define RTW89_H2C_SCANOFLD_W2_NORM_PD GENMASK(15, 0)
#define RTW89_H2C_SCANOFLD_W2_SLOW_PD GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_W3_TSF_HIGH GENMASK(31, 0)
+#define RTW89_H2C_SCANOFLD_W4_TSF_LOW GENMASK(31, 0)
struct rtw89_h2c_scanofld_be_macc_role {
__le32 w0;
@@ -2711,7 +2760,9 @@ struct rtw89_h2c_scanofld_be {
__le32 w6;
__le32 w7;
__le32 w8;
- struct rtw89_h2c_scanofld_be_macc_role role[];
+ __le32 w9; /* Added after SCAN_OFFLOAD_BE_V1 */
+ /* struct rtw89_h2c_scanofld_be_macc_role (flexible number) */
+ /* struct rtw89_h2c_scanofld_be_opch (flexible number) */
} __packed;
#define RTW89_H2C_SCANOFLD_BE_W0_OP GENMASK(1, 0)
@@ -2742,6 +2793,16 @@ struct rtw89_h2c_scanofld_be {
#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ GENMASK(7, 0)
#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ GENMASK(15, 8)
#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP GENMASK(23, 16)
+
+struct rtw89_h2c_fwips {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_FW_IPS_W0_MACID GENMASK(7, 0)
+#define RTW89_H2C_FW_IPS_W0_ENABLE BIT(8)
static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val)
{
@@ -3741,17 +3802,28 @@ enum rtw89_fw_element_id {
RTW89_FW_ELEMENT_ID_NUM,
};
-#define BITS_OF_RTW89_TXPWR_FW_ELEMENTS \
+#define BITS_OF_RTW89_TXPWR_FW_ELEMENTS_NO_6GHZ \
(BIT(RTW89_FW_ELEMENT_ID_TXPWR_BYRATE) | \
BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ) | \
BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ) | \
- BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ) | \
BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ) | \
BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ) | \
- BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ) | \
BIT(RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT) | \
BIT(RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU))
+#define BITS_OF_RTW89_TXPWR_FW_ELEMENTS \
+ (BITS_OF_RTW89_TXPWR_FW_ELEMENTS_NO_6GHZ | \
+ BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ) | \
+ BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ))
+
+#define RTW89_AX_GEN_DEF_NEEDED_FW_ELEMENTS_NO_6GHZ \
+ (BIT(RTW89_FW_ELEMENT_ID_BB_REG) | \
+ BIT(RTW89_FW_ELEMENT_ID_RADIO_A) | \
+ BIT(RTW89_FW_ELEMENT_ID_RADIO_B) | \
+ BIT(RTW89_FW_ELEMENT_ID_RF_NCTL) | \
+ BIT(RTW89_FW_ELEMENT_ID_TXPWR_TRK) | \
+ BITS_OF_RTW89_TXPWR_FW_ELEMENTS_NO_6GHZ)
+
#define RTW89_BE_GEN_DEF_NEEDED_FW_ELEMENTS (BIT(RTW89_FW_ELEMENT_ID_BBMCU0) | \
BIT(RTW89_FW_ELEMENT_ID_BB_REG) | \
BIT(RTW89_FW_ELEMENT_ID_RADIO_A) | \
@@ -3935,6 +4007,7 @@ enum rtw89_wow_h2c_func {
H2C_FUNC_WOW_GLOBAL = 0x2,
H2C_FUNC_GTK_OFLD = 0x3,
H2C_FUNC_ARP_OFLD = 0x4,
+ H2C_FUNC_NLO = 0x7,
H2C_FUNC_WAKEUP_CTRL = 0x8,
H2C_FUNC_WOW_CAM_UPD = 0xC,
H2C_FUNC_AOAC_REPORT_REQ = 0xD,
@@ -3942,13 +4015,27 @@ enum rtw89_wow_h2c_func {
NUM_OF_RTW89_WOW_H2C_FUNC,
};
-#define RTW89_WOW_WAIT_COND(func) \
- (NUM_OF_RTW89_WOW_H2C_FUNC + (func))
+#define RTW89_WOW_WAIT_COND(tag, func) \
+ ((tag) * NUM_OF_RTW89_WOW_H2C_FUNC + (func))
+
+#define RTW89_WOW_WAIT_COND_AOAC \
+ RTW89_WOW_WAIT_COND(0 /* don't care */, H2C_FUNC_AOAC_REPORT_REQ)
/* CLASS 2 - PS */
#define H2C_CL_MAC_PS 0x2
-#define H2C_FUNC_MAC_LPS_PARM 0x0
-#define H2C_FUNC_P2P_ACT 0x1
+enum rtw89_ps_h2c_func {
+ H2C_FUNC_MAC_LPS_PARM = 0x0,
+ H2C_FUNC_P2P_ACT = 0x1,
+ H2C_FUNC_IPS_CFG = 0x3,
+
+ NUM_OF_RTW89_PS_H2C_FUNC,
+};
+
+#define RTW89_PS_WAIT_COND(tag, func) \
+ ((tag) * NUM_OF_RTW89_PS_H2C_FUNC + (func))
+
+#define RTW89_PS_WAIT_COND_IPS_CFG \
+ RTW89_PS_WAIT_COND(0 /* don't care */, H2C_FUNC_IPS_CFG)
/* CLASS 3 - FW download */
#define H2C_CL_MAC_FWDL 0x3
@@ -4095,7 +4182,7 @@ struct rtw89_fw_h2c_rf_get_mccch {
#define NUM_OF_RTW89_FW_RFK_PATH 2
#define NUM_OF_RTW89_FW_RFK_TBL 3
-struct rtw89_fw_h2c_rfk_pre_info {
+struct rtw89_fw_h2c_rfk_pre_info_common {
struct {
__le32 ch[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL];
__le32 band[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL];
@@ -4108,6 +4195,11 @@ struct rtw89_fw_h2c_rfk_pre_info {
} __packed tbl;
__le32 phy_idx;
+} __packed;
+
+struct rtw89_fw_h2c_rfk_pre_info_v0 {
+ struct rtw89_fw_h2c_rfk_pre_info_common common;
+
__le32 cur_band;
__le32 cur_bw;
__le32 cur_center_ch;
@@ -4127,6 +4219,11 @@ struct rtw89_fw_h2c_rfk_pre_info {
} __packed mlo;
} __packed;
+struct rtw89_fw_h2c_rfk_pre_info {
+ struct rtw89_fw_h2c_rfk_pre_info_common common;
+ __le32 mlo_1_1;
+} __packed;
+
struct rtw89_h2c_rf_tssi {
__le16 len;
u8 phy;
@@ -4366,6 +4463,7 @@ int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type);
@@ -4378,12 +4476,14 @@ int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
struct list_head *chan_list);
int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
struct list_head *chan_list);
-int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
- struct rtw89_scan_option *opt,
- struct rtw89_vif *vif);
+int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *opt,
+ struct rtw89_vif *vif,
+ bool wowlan);
int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *opt,
- struct rtw89_vif *vif);
+ struct rtw89_vif *vif,
+ bool wowlan);
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page);
@@ -4391,12 +4491,17 @@ int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- enum rtw89_tssi_mode tssi_mode);
-int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+ const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode);
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack);
@@ -4420,6 +4525,8 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif);
+int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable);
struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len);
struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len);
int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
@@ -4434,10 +4541,14 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
bool enable);
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
-int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif, bool connected);
+int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected);
+int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool connected);
+int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
const struct rtw89_pkt_drop_params *params);
@@ -4450,6 +4561,8 @@ int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool enable);
int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool enable);
+int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable);
int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool enable);
int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev,
@@ -4488,7 +4601,7 @@ int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_add_arg *arg);
int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_start_arg *arg);
-int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx);
+int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx);
int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_req_tsf_arg *arg,
struct rtw89_mac_mrc_tsf_rpt *rpt);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index e2399796aeb1..c70a23a763b0 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -1625,6 +1625,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_size18 = {RTW89_WDE_PG_64, 0, 2048,},
/* 8852C PCIE SCC */
.wde_size19 = {RTW89_WDE_PG_64, 3328, 0,},
+ .wde_size23 = {RTW89_WDE_PG_64, 1022, 2,},
/* PCIE */
.ple_size0 = {RTW89_PLE_PG_128, 1520, 16,},
.ple_size0_v1 = {RTW89_PLE_PG_128, 2688, 240, 212992,},
@@ -1635,6 +1636,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_size6 = {RTW89_PLE_PG_128, 496, 16,},
/* DLFW */
.ple_size8 = {RTW89_PLE_PG_128, 64, 960,},
+ .ple_size9 = {RTW89_PLE_PG_128, 2288, 16,},
/* 8852C DLFW */
.ple_size18 = {RTW89_PLE_PG_128, 2544, 16,},
/* 8852C PCIE SCC */
@@ -1652,6 +1654,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_qt17 = {0, 0, 0, 0,},
/* 8852C PCIE SCC */
.wde_qt18 = {3228, 60, 0, 40,},
+ .wde_qt23 = {958, 48, 0, 16,},
.ple_qt0 = {320, 320, 32, 16, 13, 13, 292, 292, 64, 18, 1, 4, 0,},
.ple_qt1 = {320, 320, 32, 16, 1316, 1316, 1595, 1595, 1367, 1321, 1, 1307, 0,},
/* PCIE SCC */
@@ -1671,12 +1674,16 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt46 = {525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,},
/* 8852C PCIE SCC */
.ple_qt47 = {525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,},
+ .ple_qt57 = {147, 0, 16, 20, 13, 13, 178, 0, 32, 14, 8, 0,},
/* PCIE 64 */
.ple_qt58 = {147, 0, 16, 20, 157, 13, 229, 0, 172, 14, 24, 0,},
+ .ple_qt59 = {147, 0, 32, 20, 1860, 13, 2025, 0, 1879, 14, 24, 0,},
/* 8852A PCIE WOW */
.ple_qt_52a_wow = {264, 0, 32, 20, 64, 13, 1005, 0, 64, 128, 120,},
/* 8852B PCIE WOW */
.ple_qt_52b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
+ /* 8852BT PCIE WOW */
+ .ple_qt_52bt_wow = {147, 0, 32, 20, 1860, 13, 1929, 0, 1879, 14, 24, 0,},
/* 8851B PCIE WOW */
.ple_qt_51b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
.ple_rsvd_qt0 = {2, 107, 107, 6, 6, 6, 6, 0, 0, 0,},
@@ -2025,11 +2032,16 @@ int rtw89_mac_resize_ple_rx_quota(struct rtw89_dev *rtwdev, bool wow)
void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool enable)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u32 msk32 = B_AX_UC_MGNT_DEC | B_AX_BMC_MGNT_DEC;
if (rtwdev->chip->chip_gen != RTW89_CHIP_AX)
return;
+ /* 8852C enable B_AX_UC_MGNT_DEC by default */
+ if (chip->chip_id == RTL8852C)
+ msk32 = B_AX_BMC_MGNT_DEC;
+
if (enable)
rtw89_write32_set(rtwdev, R_AX_SEC_ENG_CTRL, msk32);
else
@@ -2254,6 +2266,8 @@ static int sec_eng_init_ax(struct rtw89_dev *rtwdev)
/* init TX encryption */
val |= (B_AX_SEC_TX_ENC | B_AX_SEC_RX_DEC);
val |= (B_AX_MC_DEC | B_AX_BC_DEC);
+ if (chip->chip_id == RTL8852C)
+ val |= B_AX_UC_MGNT_DEC;
if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
chip->chip_id == RTL8851B)
val &= ~B_AX_TX_PARTIAL_MODE;
@@ -2728,6 +2742,7 @@ bool rtw89_mac_is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val, reg;
int ret;
@@ -2766,6 +2781,12 @@ static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU);
}
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AGG_LEN_VHT_0, mac_idx);
+ rtw89_write32_mask(rtwdev, reg,
+ B_AX_AMPDU_MAX_LEN_VHT_MASK, 0x3FF80);
+ }
+
return 0;
}
@@ -3781,7 +3802,7 @@ static int rtw89_mac_enable_cpu_ax(struct rtw89_dev *rtwdev, u8 boot_reason,
rtw89_write32(rtwdev, R_AX_WCPU_FW_CTRL, val);
- if (rtwdev->chip->chip_id == RTL8852B)
+ if (rtw89_is_rtl885xb(rtwdev))
rtw89_write32_mask(rtwdev, R_AX_SEC_CTRL,
B_AX_SEC_IDMEM_SIZE_CONFIG_MASK, 0x2);
@@ -4774,14 +4795,14 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
case RTW89_SCAN_ENTER_OP_NOTIFY:
case RTW89_SCAN_ENTER_CH_NOTIFY:
if (rtw89_is_op_chan(rtwdev, band, chan)) {
- rtw89_assign_entity_chan(rtwdev, rtwvif->sub_entity_idx,
+ rtw89_assign_entity_chan(rtwdev, rtwvif->chanctx_idx,
&rtwdev->scan_info.op_chan);
rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
ieee80211_wake_queues(rtwdev->hw);
} else {
rtw89_chan_create(&new, chan, chan, band,
RTW89_CHANNEL_WIDTH_20);
- rtw89_assign_entity_chan(rtwdev, rtwvif->sub_entity_idx,
+ rtw89_assign_entity_chan(rtwdev, rtwvif->chanctx_idx,
&new);
}
break;
@@ -4866,6 +4887,7 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
{
/* N.B. This will run in interrupt context. */
struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_wait_info *ps_wait = &rtwdev->mac.ps_wait;
const struct rtw89_c2h_done_ack *c2h =
(const struct rtw89_c2h_done_ack *)skb_c2h->data;
u8 h2c_cat = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_CAT);
@@ -4886,6 +4908,18 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
switch (h2c_class) {
default:
return;
+ case H2C_CL_MAC_PS:
+ switch (h2c_func) {
+ default:
+ return;
+ case H2C_FUNC_IPS_CFG:
+ cond = RTW89_PS_WAIT_COND_IPS_CFG;
+ break;
+ }
+
+ data.err = !!h2c_return;
+ rtw89_complete_cond(ps_wait, cond, &data);
+ return;
case H2C_CL_MAC_FW_OFLD:
switch (h2c_func) {
default:
@@ -5144,11 +5178,10 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
- struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_wait_info *wait = &rtw_wow->wait;
const struct rtw89_c2h_wow_aoac_report *c2h =
(const struct rtw89_c2h_wow_aoac_report *)skb->data;
struct rtw89_completion_data data = {};
- unsigned int cond;
aoac_rpt->rpt_ver = c2h->rpt_ver;
aoac_rpt->sec_type = c2h->sec_type;
@@ -5166,8 +5199,7 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le
aoac_rpt->igtk_ipn = le64_to_cpu(c2h->igtk_ipn);
memcpy(aoac_rpt->igtk, c2h->igtk, sizeof(aoac_rpt->igtk));
- cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ);
- rtw89_complete_cond(wait, cond, &data);
+ rtw89_complete_cond(wait, RTW89_WOW_WAIT_COND_AOAC, &data);
}
static void
@@ -6513,8 +6545,9 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.is_txq_empty = mac_is_txq_empty_ax,
- .add_chan_list = rtw89_hw_scan_add_chan_list,
- .scan_offload = rtw89_fw_h2c_scan_offload,
+ .add_chan_list = rtw89_hw_scan_add_chan_list_ax,
+ .add_chan_list_pno = rtw89_pno_scan_add_chan_list_ax,
+ .scan_offload = rtw89_fw_h2c_scan_offload_ax,
.wow_config_mac = rtw89_wow_config_mac_ax,
};
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index d5895516b3ed..67c2a4507124 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -421,7 +421,6 @@ enum rtw89_mac_c2h_mrc_func {
enum rtw89_mac_c2h_wow_func {
RTW89_MAC_C2H_FUNC_AOAC_REPORT,
- RTW89_MAC_C2H_FUNC_READ_WOW_CAM,
NUM_OF_RTW89_MAC_C2H_FUNC_WOW,
};
@@ -885,12 +884,14 @@ struct rtw89_mac_size_set {
const struct rtw89_dle_size wde_size9;
const struct rtw89_dle_size wde_size18;
const struct rtw89_dle_size wde_size19;
+ const struct rtw89_dle_size wde_size23;
const struct rtw89_dle_size ple_size0;
const struct rtw89_dle_size ple_size0_v1;
const struct rtw89_dle_size ple_size3_v1;
const struct rtw89_dle_size ple_size4;
const struct rtw89_dle_size ple_size6;
const struct rtw89_dle_size ple_size8;
+ const struct rtw89_dle_size ple_size9;
const struct rtw89_dle_size ple_size18;
const struct rtw89_dle_size ple_size19;
const struct rtw89_wde_quota wde_qt0;
@@ -900,6 +901,7 @@ struct rtw89_mac_size_set {
const struct rtw89_wde_quota wde_qt7;
const struct rtw89_wde_quota wde_qt17;
const struct rtw89_wde_quota wde_qt18;
+ const struct rtw89_wde_quota wde_qt23;
const struct rtw89_ple_quota ple_qt0;
const struct rtw89_ple_quota ple_qt1;
const struct rtw89_ple_quota ple_qt4;
@@ -911,9 +913,12 @@ struct rtw89_mac_size_set {
const struct rtw89_ple_quota ple_qt45;
const struct rtw89_ple_quota ple_qt46;
const struct rtw89_ple_quota ple_qt47;
+ const struct rtw89_ple_quota ple_qt57;
const struct rtw89_ple_quota ple_qt58;
+ const struct rtw89_ple_quota ple_qt59;
const struct rtw89_ple_quota ple_qt_52a_wow;
const struct rtw89_ple_quota ple_qt_52b_wow;
+ const struct rtw89_ple_quota ple_qt_52bt_wow;
const struct rtw89_ple_quota ple_qt_51b_wow;
const struct rtw89_rsvd_quota ple_rsvd_qt0;
const struct rtw89_rsvd_quota ple_rsvd_qt1;
@@ -1000,9 +1005,12 @@ struct rtw89_mac_gen_def {
int (*add_chan_list)(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool connected);
+ int (*add_chan_list_pno)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int (*scan_offload)(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *option,
- struct rtw89_vif *rtwvif);
+ struct rtw89_vif *rtwvif,
+ bool wowlan);
int (*wow_config_mac)(struct rtw89_dev *rtwdev, bool enable_wow);
};
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 1508693032cb..48ad0d0f76bf 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -90,7 +90,7 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
rtw89_leave_ips(rtwdev);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0,
+ rtw89_config_entity_chandef(rtwdev, RTW89_CHANCTX_0,
&hw->conf.chandef);
rtw89_set_channel(rtwdev);
}
@@ -126,7 +126,9 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtwvif->rtwdev = rtwdev;
rtwvif->roc.state = RTW89_ROC_IDLE;
rtwvif->offchan = false;
- list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
+ if (!rtw89_rtwvif_in_list(rtwdev, rtwvif))
+ list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
+
INIT_WORK(&rtwvif->update_beacon_work, rtw89_core_update_beacon_work);
INIT_DELAYED_WORK(&rtwvif->roc.roc_work, rtw89_roc_work);
rtw89_leave_ps_mode(rtwdev);
@@ -144,7 +146,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtwvif->bcn_hit_cond = 0;
rtwvif->mac_idx = RTW89_MAC_0;
rtwvif->phy_idx = RTW89_PHY_0;
- rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
+ rtwvif->chanctx_idx = RTW89_CHANCTX_0;
rtwvif->chanctx_assigned = false;
rtwvif->hit_rule = 0;
rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
@@ -313,7 +315,7 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev,
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
u8 slot_time;
u8 sifs;
@@ -503,7 +505,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
- chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
if (chan->band_type == RTW89_BAND_6G) {
mutex_unlock(&rtwdev->mutex);
return -EOPNOTSUPP;
@@ -519,7 +521,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_TYPE_CHANGE);
rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
- rtw89_chip_rfk_channel(rtwdev);
+ rtw89_chip_rfk_channel(rtwdev, rtwvif);
rtw89_queue_chanctx_work(rtwdev);
mutex_unlock(&rtwdev->mutex);
@@ -783,7 +785,7 @@ static void rtw89_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta
rtwsta->use_cfg_mask = true;
rtwsta->mask = *br_data->mask;
- rtw89_phy_ra_updata_sta(br_data->rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
+ rtw89_phy_ra_update_sta(br_data->rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
}
static void rtw89_ra_mask_info_update(struct rtw89_dev *rtwdev,
@@ -925,7 +927,7 @@ static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
- rtw89_phy_ra_updata_sta(rtwdev, sta, changed);
+ rtw89_phy_ra_update_sta(rtwdev, sta, changed);
}
static int rtw89_ops_add_chanctx(struct ieee80211_hw *hw,
@@ -1147,6 +1149,22 @@ static void rtw89_set_rekey_data(struct ieee80211_hw *hw,
}
#endif
+static void rtw89_ops_rfkill_poll(struct ieee80211_hw *hw)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+
+ /* wl_disable GPIO get floating when entering LPS */
+ if (test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
+ goto out;
+
+ rtw89_core_rfkill_poll(rtwdev, false);
+
+out:
+ mutex_unlock(&rtwdev->mutex);
+}
+
const struct ieee80211_ops rtw89_ops = {
.tx = rtw89_ops_tx,
.wake_tx_queue = rtw89_ops_wake_tx_queue,
@@ -1193,5 +1211,6 @@ const struct ieee80211_ops rtw89_ops = {
.set_wakeup = rtw89_ops_set_wakeup,
.set_rekey_data = rtw89_set_rekey_data,
#endif
+ .rfkill_poll = rtw89_ops_rfkill_poll,
};
EXPORT_SYMBOL(rtw89_ops);
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index f212b67771d5..31f0a5225b11 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -2599,6 +2599,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.is_txq_empty = mac_is_txq_empty_be,
.add_chan_list = rtw89_hw_scan_add_chan_list_be,
+ .add_chan_list_pno = rtw89_pno_scan_add_chan_list_be,
.scan_offload = rtw89_fw_h2c_scan_offload_be,
.wow_config_mac = rtw89_wow_config_mac_be,
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index ad11d1414874..c7165e757842 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -302,7 +302,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
struct rtw89_ra_info *ra = &rtwsta->ra;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi);
@@ -341,8 +341,11 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
mode |= RTW89_RA_MODE_VHT;
csi_mode = RTW89_RA_RPT_MODE_VHT;
- /* MCS9, MCS8, MCS7 */
- ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
+ /* MCS9 (non-20MHz), MCS8, MCS7 */
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ ra_mask |= get_mcs_ra_mask(mcs_map, 8, 1);
+ else
+ ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
high_rate_masks = rtw89_ra_mask_vht_rates;
if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
stbc_en = 1;
@@ -353,8 +356,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
csi_mode = RTW89_RA_RPT_MODE_HT;
ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) |
((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) |
- (sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
- (sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
+ ((u64)sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
+ ((u64)sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
high_rate_masks = rtw89_ra_mask_ht_rates;
if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = 1;
@@ -462,7 +465,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ra->csi_mode = csi_mode;
}
-void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
+void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
u32 changed)
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
@@ -528,7 +531,7 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_phy_rate_pattern next_pattern = {0};
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = {
RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0),
RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0),
@@ -610,17 +613,17 @@ out:
rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n");
}
-static void rtw89_phy_ra_updata_sta_iter(void *data, struct ieee80211_sta *sta)
+static void rtw89_phy_ra_update_sta_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
- rtw89_phy_ra_updata_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
+ rtw89_phy_ra_update_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
}
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev)
{
ieee80211_iterate_stations_atomic(rtwdev->hw,
- rtw89_phy_ra_updata_sta_iter,
+ rtw89_phy_ra_update_sta_iter,
rtwdev);
}
@@ -3081,6 +3084,7 @@ EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait);
int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
enum rtw89_tssi_mode tssi_mode,
unsigned int ms)
{
@@ -3088,7 +3092,7 @@ int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, tssi_mode);
+ ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, chan, tssi_mode);
if (ret)
return ret;
@@ -3098,13 +3102,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait);
int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3114,13 +3119,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait);
int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3130,13 +3136,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait);
int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3146,13 +3153,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait);
int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3162,13 +3170,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait);
int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -4285,7 +4294,7 @@ void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
if (!chip->ul_tb_waveform_ctrl)
@@ -5367,7 +5376,7 @@ static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev)
static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
{
struct rtw89_dig_info *dig = &rtwdev->dig;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
bool is_linked = rtwdev->total_sta_assoc > 0;
const u16 *fa_th_src = NULL;
@@ -5392,7 +5401,7 @@ static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th));
}
-static const u8 pd_low_th_offset = 20, dynamic_igi_min = 0x20;
+static const u8 pd_low_th_offset = 16, dynamic_igi_min = 0x20;
static const u8 igi_max_performance_mode = 0x5a;
static const u8 dynamic_pd_threshold_max;
@@ -5611,7 +5620,7 @@ static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev)
static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
bool enable)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
enum rtw89_bandwidth cbw = chan->band_width;
struct rtw89_dig_info *dig = &rtwdev->dig;
@@ -5690,38 +5699,47 @@ void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
}
#define IGI_RSSI_MIN 10
+#define ABS_IGI_MIN 0xc
void rtw89_phy_dig(struct rtw89_dev *rtwdev)
{
struct rtw89_dig_info *dig = &rtwdev->dig;
bool is_linked = rtwdev->total_sta_assoc > 0;
+ u8 igi_min;
if (unlikely(dig->bypass_dig)) {
dig->bypass_dig = false;
return;
}
+ rtw89_phy_dig_update_rssi_info(rtwdev);
+
if (!dig->is_linked_pre && is_linked) {
rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
rtw89_phy_dig_update_para(rtwdev);
+ dig->igi_fa_rssi = dig->igi_rssi;
} else if (dig->is_linked_pre && !is_linked) {
rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n");
rtw89_phy_dig_update_para(rtwdev);
+ dig->igi_fa_rssi = dig->igi_rssi;
}
dig->is_linked_pre = is_linked;
rtw89_phy_dig_igi_offset_by_env(rtwdev);
- rtw89_phy_dig_update_rssi_info(rtwdev);
- dig->dyn_igi_min = (dig->igi_rssi > IGI_RSSI_MIN) ?
- dig->igi_rssi - IGI_RSSI_MIN : 0;
- dig->dyn_igi_max = dig->dyn_igi_min + IGI_OFFSET_MAX;
- dig->igi_fa_rssi = dig->dyn_igi_min + dig->fa_rssi_ofst;
+ igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0);
+ dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode);
+ dig->dyn_igi_min = max(igi_min, ABS_IGI_MIN);
- dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
- dig->dyn_igi_max);
+ if (dig->dyn_igi_max >= dig->dyn_igi_min) {
+ dig->igi_fa_rssi += dig->fa_rssi_ofst;
+ dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
+ dig->dyn_igi_max);
+ } else {
+ dig->igi_fa_rssi = dig->dyn_igi_max;
+ }
rtw89_debug(rtwdev, RTW89_DBG_DIG,
- "rssi=%03d, dyn(max,min)=(%d,%d), final_rssi=%d\n",
+ "rssi=%03d, dyn_joint(max,min)=(%d,%d), final_rssi=%d\n",
dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
dig->igi_fa_rssi);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index d8df553b9cb0..6dd8ec46939a 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -894,7 +894,7 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta);
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
-void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
+void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
u32 changed);
void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
@@ -907,22 +907,28 @@ int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
unsigned int ms);
int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
enum rtw89_tssi_mode tssi_mode,
unsigned int ms);
int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 92074b73ebeb..aebd6404f802 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -98,10 +98,10 @@ static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
}
-static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id)
+static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
struct rtw89_lps_parm lps_param = {
- .macid = mac_id,
+ .macid = rtwvif->mac_id,
.psmode = RTW89_MAC_AX_PS_MODE_ACTIVE,
.lastrpwm = RTW89_LAST_RPWM_ACTIVE,
};
@@ -109,6 +109,7 @@ static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id)
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
rtw89_fw_leave_lps_check(rtwdev, 0);
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
+ rtw89_chip_digital_pwr_comp(rtwdev, rtwvif->phy_idx);
}
void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
@@ -137,7 +138,7 @@ static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwv
rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
return;
- __rtw89_leave_lps(rtwdev, rtwvif->mac_id);
+ __rtw89_leave_lps(rtwdev, rtwvif);
}
void rtw89_leave_lps(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 7df36f3bff0b..69678eab2309 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -107,6 +107,15 @@
#define B_AX_DBG_SEL0_16BIT BIT(11)
#define B_AX_DBG_SEL0 GENMASK(7, 0)
+#define R_AX_GPIO_EXT_CTRL 0x0060
+#define B_AX_GPIO_MOD_15_TO_8_MASK GENMASK(31, 24)
+#define B_AX_GPIO_MOD_9 BIT(25)
+#define B_AX_GPIO_IO_SEL_15_TO_8_MASK GENMASK(23, 16)
+#define B_AX_GPIO_IO_SEL_9 BIT(17)
+#define B_AX_GPIO_OUT_15_TO_8_MASK GENMASK(15, 8)
+#define B_AX_GPIO_IN_15_TO_8_MASK GENMASK(7, 0)
+#define B_AX_GPIO_IN_9 BIT(1)
+
#define R_AX_SYS_SDIO_CTRL 0x0070
#define B_AX_PCIE_DIS_L2_CTRL_LDO_HCI BIT(15)
#define B_AX_PCIE_DIS_WLSUS_AFT_PDN BIT(14)
@@ -267,6 +276,9 @@
#define R_AX_GPIO0_7_FUNC_SEL 0x02D0
+#define R_AX_GPIO8_15_FUNC_SEL 0x02D4
+#define B_AX_PINMUX_GPIO9_FUNC_SEL_MASK GENMASK(7, 4)
+
#define R_AX_EECS_EESK_FUNC_SEL 0x02D8
#define B_AX_PINMUX_EESK_FUNC_SEL_MASK GENMASK(7, 4)
@@ -706,6 +718,14 @@
B_AX_HDT_CHANNEL_DMA_ERR_INT_EN | \
B_AX_HDT_TOTAL_LEN_ERR_INT_EN | \
B_AX_HDT_DMA_PROCESS_ERR_INT_EN)
+#define B_AX_HOST_DISP_IMR_SET_V01 (B_AX_HDT_CHANNEL_DIFF_ERR_INT_EN | \
+ B_AX_HDT_PAYLOAD_OVERFLOW_INT_EN | \
+ B_AX_HDT_PAYLOAD_UNDERFLOW_INT_EN | \
+ B_AX_HDT_CHANNEL_DMA_ERR_INT_EN | \
+ B_AX_HDT_TOTAL_LEN_ERR_INT_EN | \
+ B_AX_HDT_DMA_PROCESS_ERR_INT_EN | \
+ B_AX_HDT_RX_WRITE_OVERFLOW_INT_EN | \
+ B_AX_HDT_RX_WRITE_UNDERFLOW_INT_EN)
#define B_AX_HR_WRFF_UNDERFLOW_ERR_INT_EN BIT(31)
#define B_AX_HR_WRFF_OVERFLOW_ERR_INT_EN BIT(30)
@@ -1096,6 +1116,7 @@
#define B_AX_WDE_BUFMGN_FRZTMR_MODE BIT(0)
#define R_AX_WDE_ERR_IMR 0x8C38
+#define B_AX_WDE_DATCHN_UAPG_ERR_INT_EN BIT(30)
#define B_AX_WDE_DATCHN_RRDY_ERR_INT_EN BIT(27)
#define B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN BIT(26)
#define B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN BIT(25)
@@ -1135,6 +1156,29 @@
B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN)
+#define B_AX_WDE_IMR_CLR_V01 (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_STRPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_UAPG_ERR_INT_EN)
#define B_AX_WDE_IMR_SET (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \
B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
@@ -1154,6 +1198,28 @@
B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN)
+#define B_AX_WDE_IMR_SET_V01 (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_STRPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN)
#define B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
#define B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
@@ -2440,6 +2506,10 @@
#define B_AX_RTS_TXTIME_TH_MASK GENMASK(15, 8)
#define B_AX_RTS_LEN_TH_MASK GENMASK(7, 0)
+#define R_AX_AGG_LEN_VHT_0 0xC618
+#define R_AX_AGG_LEN_VHT_0_C1 0xE618
+#define B_AX_AMPDU_MAX_LEN_VHT_MASK GENMASK(19, 0)
+
#define S_AX_CTS2S_TH_SEC_256B 1
#define R_AX_SIFS_SETTING 0xC624
#define R_AX_SIFS_SETTING_C1 0xE624
@@ -3098,7 +3168,9 @@
B_AX_OFDM_CCA_TIMEOUT_INT_EN | \
B_AX_DATA_ON_TIMEOUT_INT_EN | \
B_AX_STS_ON_TIMEOUT_INT_EN | \
- B_AX_CSI_ON_TIMEOUT_INT_EN)
+ B_AX_CSI_ON_TIMEOUT_INT_EN | \
+ B_AX_PHYINTF_TIMEOUT_THR_MSAK)
+#define B_AX_PHYINFO_IMR_SET (B_AX_PHY_TXON_TIMEOUT_INT_EN | 0x7)
#define R_AX_PHYINFO_ERR_ISR 0xCCFC
#define R_AX_PHYINFO_ERR_ISR_C1 0xECFC
@@ -3854,6 +3926,15 @@
#define R_BE_EFUSE_CTRL_1_V1 0x0034
#define B_BE_EF_DATA_MASK GENMASK(31, 0)
+#define R_BE_GPIO_EXT_CTRL 0x0060
+#define B_BE_GPIO_MOD_15_TO_8_MASK GENMASK(31, 24)
+#define B_BE_GPIO_MOD_9 BIT(25)
+#define B_BE_GPIO_IO_SEL_15_TO_8_MASK GENMASK(23, 16)
+#define B_BE_GPIO_IO_SEL_9 BIT(17)
+#define B_BE_GPIO_OUT_15_TO_8_MASK GENMASK(15, 8)
+#define B_BE_GPIO_IN_15_TO_8_MASK GENMASK(7, 0)
+#define B_BE_GPIO_IN_9 BIT(1)
+
#define R_BE_WL_BT_PWR_CTRL 0x0068
#define B_BE_ISO_BD2PP BIT(31)
#define B_BE_LDOV12B_EN BIT(30)
@@ -4299,6 +4380,9 @@
#define B_BE_REG_CK40M_EN BIT(1)
#define B_BE_REG_CK640M_EN BIT(0)
+#define R_BE_GPIO8_15_FUNC_SEL 0x02D4
+#define B_BE_PINMUX_GPIO9_FUNC_SEL_MASK GENMASK(7, 4)
+
#define R_BE_WLAN_XTAL_SI_CTRL 0x0270
#define B_BE_WL_XTAL_SI_CMD_POLL BIT(31)
#define B_BE_WL_XTAL_SI_CHIPID_MASK GENMASK(30, 28)
@@ -5964,6 +6048,9 @@
#define R_BE_WP_PAGE_INFO1 0xB7AC
#define B_BE_WP_AVAL_PG_MASK GENMASK(28, 16)
+#define R_BE_LTPC_T0_PATH0 0xBA28
+#define R_BE_LTPC_T0_PATH1 0xBB28
+
#define R_BE_CMAC_SHARE_FUNC_EN 0x0E000
#define B_BE_CMAC_SHARE_CRPRT BIT(31)
#define B_BE_CMAC_SHARE_EN BIT(30)
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index a251b0e3b16e..a7720a1f17a7 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -800,7 +800,7 @@ static bool __rtw89_reg_6ghz_tpe_recalc(struct rtw89_dev *rtwdev)
const struct rtw89_reg_6ghz_tpe *tmp;
const struct rtw89_chan *chan;
- chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
if (chan->band_type != RTW89_BAND_6G)
continue;
@@ -872,7 +872,7 @@ static bool __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev)
u8 index;
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
- chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
if (chan->band_type != RTW89_BAND_6G)
continue;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 40cf84a79c46..1679bd408ef3 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -185,6 +185,15 @@ static const struct rtw89_rrsr_cfgs rtw8851b_rrsr_cfgs = {
.rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2},
};
+static const struct rtw89_rfkill_regs rtw8851b_rfkill_regs = {
+ .pinmux = {R_AX_GPIO8_15_FUNC_SEL,
+ B_AX_PINMUX_GPIO9_FUNC_SEL_MASK,
+ 0xf},
+ .mode = {R_AX_GPIO_EXT_CTRL + 2,
+ (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16,
+ 0x0},
+};
+
static const struct rtw89_dig_regs rtw8851b_dig_regs = {
.seg0_pd_reg = R_SEG0R_PD_V1,
.pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
@@ -1578,28 +1587,31 @@ static void rtw8851b_rfk_init(struct rtw89_dev *rtwdev)
rtw8851b_aack(rtwdev);
rtw8851b_rck(rtwdev);
rtw8851b_dack(rtwdev);
- rtw8851b_rx_dck(rtwdev, RTW89_PHY_0);
+ rtw8851b_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
}
-static void rtw8851b_rfk_channel(struct rtw89_dev *rtwdev)
+static void rtw8851b_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+ enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- rtw8851b_rx_dck(rtwdev, phy_idx);
- rtw8851b_iqk(rtwdev, phy_idx);
- rtw8851b_tssi(rtwdev, phy_idx, true);
- rtw8851b_dpk(rtwdev, phy_idx);
+ rtw8851b_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ rtw8851b_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8851b_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8851b_dpk(rtwdev, phy_idx, chanctx_idx);
}
static void rtw8851b_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8851b_tssi_scan(rtwdev, phy_idx);
+ rtw8851b_tssi_scan(rtwdev, phy_idx, chan);
}
-static void rtw8851b_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+static void rtw8851b_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool start)
{
- rtw8851b_wifi_scan_notify(rtwdev, start, RTW89_PHY_0);
+ rtw8851b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
}
static void rtw8851b_rfk_track(struct rtw89_dev *rtwdev)
@@ -1801,7 +1813,7 @@ rtw8851b_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
static void rtw8851b_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8851b_btc_preagc_en_defs_tbl :
&rtw8851b_btc_preagc_dis_defs_tbl);
@@ -1824,7 +1836,7 @@ static void rtw8851b_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
static void rtw8851b_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
if (en) {
rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
@@ -1869,7 +1881,7 @@ static void rtw8851b_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
static void rtw8851b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
enum rtw89_rf_path_bit rx_path)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u32 rst_mask0;
if (rx_path == RF_A) {
@@ -2375,9 +2387,11 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.get_thermal = rtw8851b_get_thermal,
.ctrl_btg_bt_rx = rtw8851b_ctrl_btg_bt_rx,
.query_ppdu = rtw8851b_query_ppdu,
+ .convert_rpl_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8851b_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8851b_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8851b_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = rtw8851b_pwr_on_func,
.pwr_off_func = rtw8851b_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2452,6 +2466,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.dig_regs = &rtw8851b_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 0,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -2463,6 +2478,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
+ .hw_mgmt_tx_encrypt = false,
.rf_path_num = 1,
.tx_nss = 1,
.rx_nss = 1,
@@ -2524,6 +2540,8 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.rrsr_cfgs = &rtw8851b_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0},
.bss_clr_map_reg = R_BSS_CLR_MAP_V1,
+ .rfkill_init = &rtw8851b_rfkill_regs,
+ .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
.dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
index a221f94627f5..364e36354225 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
@@ -521,9 +521,10 @@ static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
}
static void _rx_dck_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool is_afe)
+ enum rtw89_rf_path path, bool is_afe,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[RX_DCK] ==== S%d RX DCK (%s / CH%d / %s / by %s)====\n", path,
@@ -574,7 +575,8 @@ static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 rf
_rxbb_ofst_swap(rtwdev, path, rf_mode);
}
-static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
+static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 rf_reg5;
u8 path;
@@ -584,7 +586,7 @@ static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_af
0x2, rtwdev->hal.cv);
for (path = 0; path < RF_PATH_NUM_8851B; path++) {
- _rx_dck_info(rtwdev, phy, path, is_afe);
+ _rx_dck_info(rtwdev, phy, path, is_afe, chanctx_idx);
rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
@@ -1481,9 +1483,9 @@ static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
}
static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- u8 path)
+ u8 path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 idx = 0;
@@ -1586,10 +1588,11 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
u32 backup_rf_val[RTW8851B_IQK_SS][BACKUP_RF_REGS_NR];
u32 backup_bb_val[BACKUP_BB_REGS_NR];
@@ -1602,7 +1605,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8851B_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
@@ -1618,9 +1621,10 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ bool force, enum rtw89_chanctx_idx chanctx_idx)
{
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
}
static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 *reg,
@@ -1746,9 +1750,9 @@ static void _dpk_init(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
}
static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -2449,7 +2453,8 @@ _error:
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u32 kip_bkup[RF_PATH_NUM_8851B][DPK_KIP_REG_NUM_8851B] = {};
@@ -2465,7 +2470,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
continue;
_dpk_bkup_kip(rtwdev, dpk_kip_reg, kip_bkup, path);
_dpk_bkup_rf(rtwdev, dpk_rf_reg, rf_bkup, path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
_dpk_init(rtwdev, path);
if (rtwdev->is_tssi_mode[path])
@@ -2505,13 +2510,14 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
_dpk_kip_pwr_clk_onoff(rtwdev, false);
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** 8851B DPK Start (Ver: 0x%x, Cv: %d) ******\n",
DPK_VER_8851B, rtwdev->hal.cv);
- _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), chanctx_idx);
}
static void _dpk_track(struct rtw89_dev *rtwdev)
@@ -2617,9 +2623,8 @@ static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_sys_defs_tbl);
@@ -2650,7 +2655,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8851B_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2664,7 +2669,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -2755,9 +2759,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
@@ -2766,9 +2769,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool all)
+ enum rtw89_rf_path path, bool all,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
@@ -2944,10 +2947,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u32 gidx, gidx_1st, gidx_2nd;
u8 ch = chan->channel;
s8 de_1st;
@@ -2980,10 +2982,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u32 tgidx, tgidx_1st, tgidx_2nd;
u8 ch = chan->channel;
s8 tde_1st;
@@ -3017,10 +3018,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
return val;
}
-static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3033,7 +3034,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
for (i = RF_PATH_A; i < RTW8851B_TSSI_PATH_NR; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3049,8 +3050,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3096,10 +3097,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p
}
static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 channel = chan->channel;
u8 band;
@@ -3255,9 +3256,10 @@ void rtw8851b_dack(struct rtw89_dev *rtwdev)
_dac_cal(rtwdev, false);
}
-void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
@@ -3265,30 +3267,32 @@ void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
-void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
- _rx_dck(rtwdev, phy_idx, false);
+ _rx_dck(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
@@ -3297,7 +3301,7 @@ void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -3308,9 +3312,11 @@ void rtw8851b_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_A);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_A, chanctx_idx);
u8 i;
rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
@@ -3319,26 +3325,26 @@ void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_e
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) {
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
_tssi_set_tssi_slope(rtwdev, phy, i);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 channel = chan->channel;
u32 i;
@@ -3348,20 +3354,21 @@ void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) {
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, bool enable)
+ enum rtw89_phy_idx phy, bool enable,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 channel = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
@@ -3379,7 +3386,7 @@ static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x\n",
@@ -3391,12 +3398,13 @@ static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev,
}
void rtw8851b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
if (scan_start)
- rtw8851b_tssi_default_txagc(rtwdev, phy_idx, true);
+ rtw8851b_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
else
- rtw8851b_tssi_default_txagc(rtwdev, phy_idx, false);
+ rtw8851b_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
}
static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h
index b66a23d6d367..ea7df628256b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h
@@ -12,15 +12,21 @@ void rtw8851b_lck_init(struct rtw89_dev *rtwdev);
void rtw8851b_lck_track(struct rtw89_dev *rtwdev);
void rtw8851b_rck(struct rtw89_dev *rtwdev);
void rtw8851b_dack(struct rtw89_dev *rtwdev);
-void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8851b_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8851b_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
-void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8851b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx);
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8851b_set_channel_rf(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 08e148328c62..dde96bd63021 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -337,6 +337,11 @@ static const struct rtw89_pwr_cfg rtw8852a_pwroff[] = {
PWR_INTF_MSK_PCIE,
PWR_BASE_MAC,
PWR_CMD_WRITE, BIT(0), 0},
+ {0x0092,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_PCIE,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(4), BIT(4)},
{0x0005,
PWR_CV_MSK_ALL,
PWR_INTF_MSK_PCIE,
@@ -478,6 +483,15 @@ static const struct rtw89_rrsr_cfgs rtw8852a_rrsr_cfgs = {
.rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2},
};
+static const struct rtw89_rfkill_regs rtw8852a_rfkill_regs = {
+ .pinmux = {R_AX_GPIO8_15_FUNC_SEL,
+ B_AX_PINMUX_GPIO9_FUNC_SEL_MASK,
+ 0xf},
+ .mode = {R_AX_GPIO_EXT_CTRL + 2,
+ (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16,
+ 0x0},
+};
+
static const struct rtw89_dig_regs rtw8852a_dig_regs = {
.seg0_pd_reg = R_SEG0R_PD,
.pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
@@ -1332,29 +1346,32 @@ static void rtw8852a_rfk_init(struct rtw89_dev *rtwdev)
rtwdev->is_tssi_mode[RF_PATH_B] = false;
rtw8852a_rck(rtwdev);
- rtw8852a_dack(rtwdev);
- rtw8852a_rx_dck(rtwdev, RTW89_PHY_0, true);
+ rtw8852a_dack(rtwdev, RTW89_CHANCTX_0);
+ rtw8852a_rx_dck(rtwdev, RTW89_PHY_0, true, RTW89_CHANCTX_0);
}
-static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev)
+static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+ enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- rtw8852a_rx_dck(rtwdev, phy_idx, true);
- rtw8852a_iqk(rtwdev, phy_idx);
- rtw8852a_tssi(rtwdev, phy_idx);
- rtw8852a_dpk(rtwdev, phy_idx);
+ rtw8852a_rx_dck(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8852a_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852a_tssi(rtwdev, phy_idx, chanctx_idx);
+ rtw8852a_dpk(rtwdev, phy_idx, chanctx_idx);
}
static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8852a_tssi_scan(rtwdev, phy_idx);
+ rtw8852a_tssi_scan(rtwdev, phy_idx, chan);
}
-static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool start)
{
- rtw8852a_wifi_scan_notify(rtwdev, start, RTW89_PHY_0);
+ rtw8852a_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx);
}
static void rtw8852a_rfk_track(struct rtw89_dev *rtwdev)
@@ -1534,10 +1551,8 @@ static void rtw8852a_start_pmac_tx(struct rtw89_dev *rtwdev,
void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
-
if (!tx_info->en_pmac_tx) {
rtw8852a_stop_pmac_tx(rtwdev, tx_info, idx);
rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
@@ -1559,7 +1574,7 @@ void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
struct rtw8852a_bb_pmac_info tx_info = {0};
@@ -1569,7 +1584,7 @@ void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
tx_info.tx_cnt = tx_cnt;
tx_info.period = period;
tx_info.tx_time = tx_time;
- rtw8852a_bb_set_pmac_tx(rtwdev, &tx_info, idx);
+ rtw8852a_bb_set_pmac_tx(rtwdev, &tx_info, idx, chan);
}
void rtw8852a_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
@@ -2098,9 +2113,11 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.get_thermal = rtw8852a_get_thermal,
.ctrl_btg_bt_rx = rtw8852a_ctrl_btg_bt_rx,
.query_ppdu = rtw8852a_query_ppdu,
+ .convert_rpl_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852a_ctrl_nbtg_bt_tx,
.cfg_txrx_path = NULL,
.set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = NULL,
.pwr_off_func = NULL,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2167,6 +2184,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.dig_regs = &rtw8852a_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 1,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -2178,6 +2196,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
+ .hw_mgmt_tx_encrypt = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
@@ -2240,6 +2259,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.rrsr_cfgs = &rtw8852a_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0},
.bss_clr_map_reg = R_BSS_CLR_MAP,
+ .rfkill_init = &rtw8852a_rfkill_regs,
+ .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
.dma_ch_mask = 0,
.edcca_regs = &rtw8852a_edcca_regs,
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.h b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
index ea82fed7b7be..d6c1acd09238 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
@@ -97,10 +97,10 @@ extern const struct rtw89_chip_info rtw8852a_chip_info;
void rtw8852a_bb_set_plcp_tx(struct rtw89_dev *rtwdev);
void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx);
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan);
void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx);
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan);
void rtw8852a_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
enum rtw89_phy_idx idx);
void rtw8852a_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
index d86429e4a35f..9db8713ac99b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
@@ -493,11 +493,12 @@ static void _dack(struct rtw89_dev *rtwdev)
_dack_s1(rtwdev);
}
-static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dack_info *dack = &rtwdev->dack;
u32 rf0_0, rf1_0;
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, chanctx_idx);
dack->dack_done = false;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
@@ -799,12 +800,13 @@ static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
}
static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
+ enum rtw89_phy_idx phy_idx, u8 path, u8 ktype,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool fail = false;
u32 iqk_cmd = 0x0;
- u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path);
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path, chanctx_idx);
u32 addr_rfc_ctl = 0x0;
if (path == RF_PATH_A)
@@ -888,7 +890,8 @@ static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
}
static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
static const u32 rxgn_a[4] = {0x18C, 0x1A0, 0x28C, 0x2A0};
@@ -927,7 +930,7 @@ static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN, 0x1);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK, chanctx_idx);
rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
}
@@ -952,7 +955,8 @@ static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
}
static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 group = 0x0;
@@ -991,7 +995,7 @@ static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
B_CFIR_LUT_GP, group);
rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK, chanctx_idx);
switch (iqk_info->iqk_band[path]) {
case RTW89_BAND_2G:
@@ -1040,7 +1044,8 @@ static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
}
static bool _txk_group_sel(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
static const u32 a_txgain[4] = {0xE466, 0x646D, 0xE4E2, 0x64ED};
static const u32 g_txgain[4] = {0x60e8, 0x60f0, 0x61e8, 0x61ED};
@@ -1083,7 +1088,7 @@ static bool _txk_group_sel(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
B_CFIR_LUT_GP, gp);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK, chanctx_idx);
rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(8 + gp + path * 4), fail);
}
@@ -1098,7 +1103,8 @@ static bool _txk_group_sel(struct rtw89_dev *rtwdev,
}
static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 group = 0x2;
@@ -1131,7 +1137,7 @@ static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, group);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK, chanctx_idx);
if (!fail) {
tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
iqk_info->nb_txcfir[path] = tmp | 0x2;
@@ -1179,7 +1185,8 @@ static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
}
static bool _iqk_lok(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 rf0 = 0x0;
@@ -1210,11 +1217,11 @@ static bool _iqk_lok(struct rtw89_dev *rtwdev,
rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
- tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE, chanctx_idx);
iqk_info->lok_cor_fail[0][path] = tmp;
fsleep(10);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
- tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE, chanctx_idx);
iqk_info->lok_fin_fail[0][path] = tmp;
fail = _lok_finetune_check(rtwdev, path);
return fail;
@@ -1321,7 +1328,8 @@ static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
}
static
-void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool lok_is_fail = false;
@@ -1333,30 +1341,35 @@ void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
for (i = 0; i < 3; i++) {
_lok_res_table(rtwdev, path, ibias++);
_iqk_txk_setting(rtwdev, path);
- lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
+ lok_is_fail = _iqk_lok(rtwdev, phy_idx, path, chanctx_idx);
if (!lok_is_fail)
break;
}
if (iqk_info->is_nbiqk)
- iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
+ iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path,
+ chanctx_idx);
else
- iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
+ iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path,
+ chanctx_idx);
_iqk_rxclk_setting(rtwdev, path);
_iqk_rxk_setting(rtwdev, path);
if (iqk_info->is_nbiqk || rtwdev->dbcc_en || iqk_info->iqk_band[path] == RTW89_BAND_2G)
- iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
+ iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path,
+ chanctx_idx);
else
- iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
+ iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path,
+ chanctx_idx);
_iqk_info_iqk(rtwdev, phy_idx, path);
}
static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, u8 path)
+ enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u32 reg_rf18 = 0x0, reg_35c = 0x0;
u8 idx = 0;
u8 get_empty_table = false;
@@ -1413,9 +1426,9 @@ static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
}
static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- u8 path)
+ u8 path, enum rtw89_chanctx_idx chanctx_idx)
{
- _iqk_by_path(rtwdev, phy_idx, path);
+ _iqk_by_path(rtwdev, phy_idx, path, chanctx_idx);
}
static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
@@ -1513,7 +1526,8 @@ static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
rtw89_rfk_parser(rtwdev, tbl);
}
-static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path)
+static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 phy_idx = 0x0;
@@ -1525,10 +1539,10 @@ static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path)
else
phy_idx = RTW89_PHY_1;
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_iqk_macbb_setting(rtwdev, phy_idx, path);
_iqk_preset(rtwdev, path);
- _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path, chanctx_idx);
_iqk_restore(rtwdev, path);
_iqk_afebb_restore(rtwdev, phy_idx, path);
}
@@ -1607,12 +1621,13 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852A_IQK_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1622,12 +1637,12 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852A_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
_iqk_macbb_setting(rtwdev, phy_idx, path);
_iqk_preset(rtwdev, path);
- _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path, chanctx_idx);
_iqk_restore(rtwdev, path);
_iqk_afebb_restore(rtwdev, phy_idx, path);
_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
@@ -1635,18 +1650,19 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
switch (_kpath(rtwdev, phy_idx)) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1656,9 +1672,10 @@ static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool forc
#define RXDCK_VER_8852A 0xe
static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool is_afe)
+ enum rtw89_rf_path path, bool is_afe,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path);
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, chanctx_idx);
u32 ori_val;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
@@ -1704,7 +1721,7 @@ static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- bool is_afe)
+ bool is_afe, enum rtw89_chanctx_idx chanctx_idx)
{
u8 path, kpath, dck_tune;
u32 rf_reg5;
@@ -1732,7 +1749,7 @@ static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
- _set_rx_dck(rtwdev, phy, path, is_afe);
+ _set_rx_dck(rtwdev, phy, path, is_afe, chanctx_idx);
rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
@@ -1800,9 +1817,10 @@ static void _dpk_reload_kip(struct rtw89_dev *rtwdev, u32 *reg,
}
static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, enum rtw8852a_dpk_id id)
+ enum rtw89_rf_path path, enum rtw8852a_dpk_id id,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path);
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, chanctx_idx);
u16 dpk_cmd = 0x0;
u32 val;
int ret;
@@ -1841,18 +1859,19 @@ static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _dpk_rx_dck(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
- _set_rx_dck(rtwdev, phy, path, false);
+ _set_rx_dck(rtwdev, phy, path, false, chanctx_idx);
}
static void _dpk_information(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 kidx = dpk->cur_idx[path];
dpk->bp[path][kidx].band = chan->band_type;
@@ -1967,7 +1986,8 @@ static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u8 cur_rxbb;
@@ -1997,7 +2017,7 @@ static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
- _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
+ _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK, chanctx_idx);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
@@ -2186,10 +2206,11 @@ static bool _dpk_sync_check(struct rtw89_dev *rtwdev,
}
static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kidx)
+ enum rtw89_rf_path path, u8 kidx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
_dpk_tpg_sel(rtwdev, path, kidx);
- _dpk_one_shot(rtwdev, phy, path, SYNC);
+ _dpk_one_shot(rtwdev, phy, path, SYNC, chanctx_idx);
return _dpk_sync_check(rtwdev, path); /*1= fail*/
}
@@ -2242,10 +2263,10 @@ static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
static void _dpk_gainloss(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy, enum rtw89_rf_path path,
- u8 kidx)
+ u8 kidx, enum rtw89_chanctx_idx chanctx_idx)
{
_dpk_table_select(rtwdev, path, kidx, 1);
- _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
+ _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS, chanctx_idx);
}
#define DPK_TXAGC_LOWER 0x2e
@@ -2322,7 +2343,7 @@ static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
- bool loss_only)
+ bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
{
#define DPK_AGC_ADJ_LMT 6
#define DPK_DGAIN_UPPER 1922
@@ -2330,7 +2351,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
#define DPK_RXBB_UPPER 0x1f
#define DPK_RXBB_LOWER 0
#define DPK_GL_CRIT 7
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
u8 agc_cnt = 0;
bool limited_rxbb = false;
@@ -2344,7 +2365,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
do {
switch (step) {
case DPK_AGC_STEP_SYNC_DGAIN:
- if (_dpk_sync(rtwdev, phy, path, kidx)) {
+ if (_dpk_sync(rtwdev, phy, path, kidx, chanctx_idx)) {
tmp_txagc = DPK_TXAGC_INVAL;
goout = true;
break;
@@ -2380,7 +2401,8 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
_dpk_bypass_rxcfir(rtwdev, path, true);
else
- _dpk_lbk_rxiqk(rtwdev, phy, path);
+ _dpk_lbk_rxiqk(rtwdev, phy, path,
+ chanctx_idx);
}
if (dgain > DPK_DGAIN_UPPER || dgain < DPK_DGAIN_LOWER)
step = DPK_AGC_STEP_SYNC_DGAIN;
@@ -2391,7 +2413,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
break;
case DPK_AGC_STEP_GAIN_LOSS_IDX:
- _dpk_gainloss(rtwdev, phy, path, kidx);
+ _dpk_gainloss(rtwdev, phy, path, kidx, chanctx_idx);
tmp_gl_idx = _dpk_gainloss_read(rtwdev);
if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
@@ -2475,11 +2497,12 @@ static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
}
static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kidx, u8 gain)
+ enum rtw89_rf_path path, u8 kidx, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
_dpk_set_mdpd_para(rtwdev, 0x0);
_dpk_table_select(rtwdev, path, kidx, 1);
- _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
+ _dpk_one_shot(rtwdev, phy, path, MDPK_IDL, chanctx_idx);
}
static void _dpk_fill_result(struct rtw89_dev *rtwdev,
@@ -2518,10 +2541,10 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
bool is_reload = false;
u8 idx, cur_band, cur_ch;
@@ -2545,7 +2568,8 @@ static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 gain)
+ enum rtw89_rf_path path, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 txagc = 0, kidx = dpk->cur_idx[path];
@@ -2558,16 +2582,16 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
_rf_direct_cntrl(rtwdev, path, false);
txagc = _dpk_set_tx_pwr(rtwdev, gain, path);
_dpk_rf_setting(rtwdev, gain, path, kidx);
- _dpk_rx_dck(rtwdev, phy, path);
+ _dpk_rx_dck(rtwdev, phy, path, chanctx_idx);
_dpk_kip_setting(rtwdev, path, kidx);
_dpk_manual_txcfir(rtwdev, path, true);
- txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
if (txagc == DPK_TXAGC_INVAL)
is_fail = true;
_dpk_get_thermal(rtwdev, kidx, path);
- _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
+ _dpk_idl_mpa(rtwdev, phy, path, kidx, gain, chanctx_idx);
rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
_dpk_fill_result(rtwdev, path, kidx, gain, txagc);
_dpk_manual_txcfir(rtwdev, path, false);
@@ -2584,7 +2608,8 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
@@ -2599,7 +2624,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (!(kpath & BIT(path)))
continue;
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
+ chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch != 0)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2624,7 +2650,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
_dpk_tssi_pause(rtwdev, path, true);
_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
}
_dpk_bb_afe_setting(rtwdev, phy, path, kpath);
@@ -2633,7 +2659,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (!(kpath & BIT(path)) || reloaded[path])
continue;
- is_fail = _dpk_main(rtwdev, phy, path, 1);
+ is_fail = _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
_dpk_onoff(rtwdev, path, is_fail);
}
@@ -2652,10 +2678,11 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
}
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_fem_info *fem = &rtwdev->fem;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK,
@@ -2682,17 +2709,19 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool force, enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
RTW8852A_DPK_VER, rtwdev->hal.cv,
RTW8852A_RF_REL_VERSION);
- if (_dpk_bypass_check(rtwdev, phy))
+ if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
_dpk_force_bypass(rtwdev, phy);
else
- _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy),
+ chanctx_idx);
}
static void _dpk_onoff(struct rtw89_dev *rtwdev,
@@ -2815,9 +2844,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev)
}
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
@@ -2826,9 +2854,9 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
}
-static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl);
@@ -2838,9 +2866,9 @@ static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
@@ -2869,7 +2897,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define __get_val(ptr, idx) \
({ \
@@ -2883,7 +2911,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -3076,9 +3103,8 @@ static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
}
static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 subband = chan->subband_type;
switch (subband) {
@@ -3252,10 +3278,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st = 0;
@@ -3290,10 +3315,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st = 0;
@@ -3328,11 +3352,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
}
static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy)
+ enum rtw89_phy_idx phy, const struct rtw89_chan *chan)
{
#define __DE_MASK 0x003ff000
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858};
static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860};
static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838};
@@ -3352,7 +3375,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
for (i = 0; i < RF_PATH_NUM_8852A; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3368,8 +3391,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
rtw89_phy_read32_mask(rtwdev, r_cck_long[i],
__DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3458,10 +3481,10 @@ static void _tssi_track(struct rtw89_dev *rtwdev)
}
}
-static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel, ch_tmp;
u8 bw = chan->band_width;
u8 band = chan->band_type;
@@ -3497,24 +3520,25 @@ static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- u8 path, s16 pwr_dbm, u8 enable)
+ u8 path, s16 pwr_dbm, u8 enable, const struct rtw89_chan *chan)
{
rtw8852a_bb_set_plcp_tx(rtwdev);
rtw8852a_bb_cfg_tx_path(rtwdev, path);
rtw8852a_bb_set_power(rtwdev, pwr_dbm, phy);
- rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy);
+ rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy, chan);
}
-static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
const struct rtw89_chip_info *mac_reg = rtwdev->chip;
u8 ch = chan->channel, ch_tmp;
u8 bw = chan->band_width;
u8 band = chan->band_type;
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0, chanctx_idx);
s8 power;
s16 xdbm;
u32 i, tx_counter = 0;
@@ -3546,9 +3570,9 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy));
tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
- _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true, chan);
mdelay(15);
- _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false, chan);
tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD) -
tx_counter;
@@ -3600,19 +3624,21 @@ void rtw8852a_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852a_dack(struct rtw89_dev *rtwdev)
+void rtw8852a_dack(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
- _dac_cal(rtwdev, false);
+ _dac_cal(rtwdev, false, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
@@ -3620,34 +3646,35 @@ void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_iqk_init(rtwdev);
if (rtwdev->dbcc_en)
- _iqk_dbcc(rtwdev, phy_idx);
+ _iqk_dbcc(rtwdev, phy_idx, chanctx_idx);
else
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- bool is_afe)
+ bool is_afe, enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
- _rx_dck(rtwdev, phy_idx, is_afe);
+ _rx_dck(rtwdev, phy_idx, is_afe, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
@@ -3655,7 +3682,7 @@ void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -3666,8 +3693,10 @@ void rtw8852a_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 i;
rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
@@ -3676,26 +3705,27 @@ void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy);
- _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, chan);
+ _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
_tssi_slope_cal_org(rtwdev, phy, i);
_tssi_set_rf_gap_tbl(rtwdev, phy, i);
_tssi_set_slope(rtwdev, phy, i);
- _tssi_pak(rtwdev, phy, i);
+ _tssi_pak(rtwdev, phy, i, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
- _tssi_high_power(rtwdev, phy);
- _tssi_pre_tx(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
+ _tssi_high_power(rtwdev, phy, chan);
+ _tssi_pre_tx(rtwdev, phy, chanctx_idx);
}
-void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
u8 i;
@@ -3710,14 +3740,14 @@ void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_pak(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_pak(rtwdev, phy, i, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
void rtw8852a_tssi_track(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h
index fa058ccc8616..8761f2cc9359 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h
@@ -8,14 +8,19 @@
#include "core.h"
void rtw8852a_rck(struct rtw89_dev *rtwdev);
-void rtw8852a_dack(struct rtw89_dev *rtwdev);
-void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852a_dack(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- bool is_afe);
-void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+ bool is_afe, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852a_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
-void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8852a_tssi_track(struct rtw89_dev *rtwdev);
void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index a22847a311ad..12be52f76427 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -150,6 +150,15 @@ static const struct rtw89_rrsr_cfgs rtw8852b_rrsr_cfgs = {
.rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2},
};
+static const struct rtw89_rfkill_regs rtw8852b_rfkill_regs = {
+ .pinmux = {R_AX_GPIO8_15_FUNC_SEL,
+ B_AX_PINMUX_GPIO9_FUNC_SEL_MASK,
+ 0xf},
+ .mode = {R_AX_GPIO_EXT_CTRL + 2,
+ (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16,
+ 0x0},
+};
+
static const struct rtw89_dig_regs rtw8852b_dig_regs = {
.seg0_pd_reg = R_SEG0R_PD_V1,
.pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
@@ -549,29 +558,32 @@ static void rtw8852b_rfk_init(struct rtw89_dev *rtwdev)
rtw8852b_dpk_init(rtwdev);
rtw8852b_rck(rtwdev);
- rtw8852b_dack(rtwdev);
- rtw8852b_rx_dck(rtwdev, RTW89_PHY_0);
+ rtw8852b_dack(rtwdev, RTW89_CHANCTX_0);
+ rtw8852b_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
}
-static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev)
+static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+ enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- rtw8852b_rx_dck(rtwdev, phy_idx);
- rtw8852b_iqk(rtwdev, phy_idx);
- rtw8852b_tssi(rtwdev, phy_idx, true);
- rtw8852b_dpk(rtwdev, phy_idx);
+ rtw8852b_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ rtw8852b_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852b_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8852b_dpk(rtwdev, phy_idx, chanctx_idx);
}
static void rtw8852b_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8852b_tssi_scan(rtwdev, phy_idx);
+ rtw8852b_tssi_scan(rtwdev, phy_idx, chan);
}
-static void rtw8852b_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+static void rtw8852b_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool start)
{
- rtw8852b_wifi_scan_notify(rtwdev, start, RTW89_PHY_0);
+ rtw8852b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
}
static void rtw8852b_rfk_track(struct rtw89_dev *rtwdev)
@@ -729,9 +741,11 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.get_thermal = rtw8852bx_get_thermal,
.ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = rtw8852bx_query_ppdu,
+ .convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
.ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = rtw8852b_pwr_on_func,
.pwr_off_func = rtw8852b_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -807,6 +821,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.dig_regs = &rtw8852b_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 0,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -818,6 +833,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
+ .hw_mgmt_tx_encrypt = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
@@ -880,6 +896,8 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.rrsr_cfgs = &rtw8852b_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0},
.bss_clr_map_reg = R_BSS_CLR_MAP_V1,
+ .rfkill_init = &rtw8852b_rfkill_regs,
+ .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
.dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
index 1745c2882acf..ede0ca5426ae 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -1445,10 +1445,8 @@ static void rtw8852bx_start_pmac_tx(struct rtw89_dev *rtwdev,
static
void rtw8852bx_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852bx_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
-
if (!tx_info->en_pmac_tx) {
rtw8852bx_stop_pmac_tx(rtwdev, tx_info, idx);
rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
@@ -1473,7 +1471,7 @@ void rtw8852bx_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
static
void __rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
struct rtw8852bx_bb_pmac_info tx_info = {0};
@@ -1484,7 +1482,7 @@ void __rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
tx_info.period = period;
tx_info.tx_time = tx_time;
- rtw8852bx_bb_set_pmac_tx(rtwdev, &tx_info, idx);
+ rtw8852bx_bb_set_pmac_tx(rtwdev, &tx_info, idx, chan);
}
static
@@ -1623,9 +1621,9 @@ static void __rtw8852bx_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
static
void __rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path)
+ enum rtw89_rf_path_bit rx_path,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u32 rst_mask0;
u32 rst_mask1;
@@ -1713,9 +1711,10 @@ static void rtw8852bx_bb_ctrl_rf_mode_rx_path(struct rtw89_dev *rtwdev,
static void __rtw8852bx_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_rf_path_bit rx_path = hal->antenna_rx ? hal->antenna_rx : RF_AB;
- rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
rtw8852bx_bb_ctrl_rf_mode_rx_path(rtwdev, rx_path);
if (rtwdev->hal.rx_nss == 1) {
@@ -1948,6 +1947,19 @@ static void __rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
rtw8852bx_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
}
+static void __rtw8852bx_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ u8 delta = phy_ppdu->rpl_avg - phy_ppdu->rssi_avg;
+ u8 *rssi = phy_ppdu->rssi;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++)
+ rssi[i] += delta;
+
+ phy_ppdu->rssi_avg = phy_ppdu->rpl_avg;
+}
+
static int __rtw8852bx_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -2030,6 +2042,7 @@ const struct rtw8852bx_info rtw8852bx_info = {
.ctrl_nbtg_bt_tx = __rtw8852bx_ctrl_nbtg_bt_tx,
.ctrl_btg_bt_rx = __rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = __rtw8852bx_query_ppdu,
+ .convert_rpl_to_rssi = __rtw8852bx_convert_rpl_to_rssi,
.read_efuse = __rtw8852bx_read_efuse,
.read_phycap = __rtw8852bx_read_phycap,
.power_trim = __rtw8852bx_power_trim,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
index 801e7ab9f4fa..3dce5422f41e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
@@ -121,13 +121,14 @@ struct rtw8852bx_info {
void (*bb_cfg_txrx_path)(struct rtw89_dev *rtwdev);
void (*bb_cfg_tx_path)(struct rtw89_dev *rtwdev, u8 tx_path);
void (*bb_ctrl_rx_path)(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path);
+ enum rtw89_rf_path_bit rx_path,
+ const struct rtw89_chan *chan);
void (*bb_set_plcp_tx)(struct rtw89_dev *rtwdev);
void (*bb_set_power)(struct rtw89_dev *rtwdev, s16 pwr_dbm,
enum rtw89_phy_idx idx);
void (*bb_set_pmac_pkt_tx)(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx);
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan);
void (*bb_backup_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
struct rtw8852bx_bb_tssi_bak *bak);
void (*bb_restore_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
@@ -145,6 +146,8 @@ struct rtw8852bx_info {
void (*query_ppdu)(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status);
+ void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu);
int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map,
enum rtw89_efuse_block block);
int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
@@ -207,9 +210,10 @@ void rtw8852bx_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path)
static inline
void rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path)
+ enum rtw89_rf_path_bit rx_path,
+ const struct rtw89_chan *chan)
{
- rtw8852bx_info.bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_info.bb_ctrl_rx_path(rtwdev, rx_path, chan);
}
static inline
@@ -228,9 +232,10 @@ void rtw8852bx_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
static inline
void rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
- rtw8852bx_info.bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt, period, tx_time, idx);
+ rtw8852bx_info.bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt, period, tx_time, idx,
+ chan);
}
static inline
@@ -291,6 +296,13 @@ void rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
}
static inline
+void rtw8852bx_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ rtw8852bx_info.convert_rpl_to_rssi(rtwdev, phy_ppdu);
+}
+
+static inline
int rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
enum rtw89_efuse_block block)
{
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
index 12354612441c..ef47a5facc83 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
@@ -1382,9 +1382,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u
_iqk_info_iqk(rtwdev, phy_idx, path);
}
-static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 reg_rf18;
u32 reg_35c;
@@ -1608,12 +1609,13 @@ static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852B_IQK_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1623,7 +1625,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852B_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
@@ -1638,20 +1640,21 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u8 kpath = _kpath(rtwdev, phy_idx);
switch (kpath) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1761,9 +1764,9 @@ static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -1786,9 +1789,10 @@ static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kpath)
+ enum rtw89_rf_path path, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_defs_tbl);
@@ -1803,9 +1807,10 @@ static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kpath)
+ enum rtw89_rf_path path, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_restore_defs_tbl);
@@ -2217,9 +2222,9 @@ static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
- bool loss_only)
+ bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 step = DPK_AGC_STEP_SYNC_DGAIN;
u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
u8 goout = 0, agc_cnt = 0, limited_rxbb = 0;
@@ -2416,9 +2421,9 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
bool is_reload = false;
u8 idx, cur_band, cur_ch;
@@ -2443,7 +2448,8 @@ static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 gain)
+ enum rtw89_rf_path path, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 txagc = 0x38, kidx = dpk->cur_idx[path];
@@ -2464,7 +2470,7 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
_dpk_kip_set_rxagc(rtwdev, phy, path);
_dpk_table_select(rtwdev, path, kidx, gain);
- txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust txagc = 0x%x\n", txagc);
if (txagc == 0xff) {
@@ -2491,7 +2497,8 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120};
@@ -2503,7 +2510,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (dpk->is_dpk_reload_en) {
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
+ chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2519,19 +2527,19 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
if (rtwdev->is_tssi_mode[path])
_dpk_tssi_pause(rtwdev, path, true);
}
- _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
+ _dpk_bb_afe_setting(rtwdev, phy, path, kpath, chanctx_idx);
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
- is_fail = _dpk_main(rtwdev, phy, path, 1);
+ is_fail = _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
_dpk_onoff(rtwdev, path, is_fail);
}
- _dpk_bb_afe_restore(rtwdev, phy, path, kpath);
+ _dpk_bb_afe_restore(rtwdev, phy, path, kpath, chanctx_idx);
_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
@@ -2543,9 +2551,10 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
}
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_fem_info *fem = &rtwdev->fem;
if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
@@ -2577,17 +2586,18 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
RTW8852B_DPK_VER, rtwdev->hal.cv,
RTW8852B_RF_REL_VERSION);
- if (_dpk_bypass_check(rtwdev, phy))
+ if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
_dpk_force_bypass(rtwdev, phy);
else
- _dpk_cal_select(rtwdev, force, phy, RF_AB);
+ _dpk_cal_select(rtwdev, force, phy, RF_AB, chanctx_idx);
}
static void _dpk_track(struct rtw89_dev *rtwdev)
@@ -2722,9 +2732,8 @@ static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
@@ -2734,9 +2743,8 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852b_tssi_sys_defs_tbl);
@@ -2778,7 +2786,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8852B_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2792,7 +2800,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -2944,9 +2951,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A)
@@ -2960,9 +2966,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool all)
+ enum rtw89_rf_path path, bool all,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
const struct rtw89_rfk_tbl *tbl = NULL;
u8 ch = chan->channel;
@@ -3231,10 +3237,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st;
@@ -3267,10 +3272,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st;
@@ -3304,10 +3308,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
return val;
}
-static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3320,7 +3324,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3336,8 +3340,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3383,10 +3387,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p
}
static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 channel = chan->channel;
u8 band;
@@ -3420,7 +3424,7 @@ static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
- u8 enable)
+ u8 enable, const struct rtw89_chan *chan)
{
enum rtw89_rf_path_bit rx_path;
@@ -3436,11 +3440,11 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (enable) {
rtw8852bx_bb_set_plcp_tx(rtwdev);
rtw8852bx_bb_cfg_tx_path(rtwdev, path);
- rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
}
- rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
+ rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy, chan);
}
static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
@@ -3494,7 +3498,7 @@ static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, const s16 *power,
- u32 *tssi_cw_rpt)
+ u32 *tssi_cw_rpt, const struct rtw89_chan *chan)
{
u32 tx_counter, tx_counter_tmp;
const int retry = 100;
@@ -3513,9 +3517,10 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
_tssi_trigger[path], tmp, path);
if (j == 0)
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true, chan);
else
- _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true,
+ chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3546,14 +3551,14 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
"[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
k, path);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
return false;
}
tssi_cw_rpt[j] =
rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], B_TSSI_CWRPT);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3567,14 +3572,13 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
0x78e4, 0x49c0, 0x0d18, 0x0d80};
static const s16 power_2g[4] = {48, 20, 4, 4};
static const s16 power_5g[4] = {48, 20, 4, 4};
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0};
u8 channel = chan->channel;
@@ -3635,7 +3639,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
- ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
+ ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan);
if (!ok)
goto out;
@@ -3755,18 +3759,19 @@ void rtw8852b_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852b_dack(struct rtw89_dev *rtwdev)
+void rtw8852b_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
_dac_cal(rtwdev, false);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
@@ -3774,15 +3779,16 @@ void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
-void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
@@ -3795,9 +3801,10 @@ void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
@@ -3806,7 +3813,7 @@ void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -3817,9 +3824,11 @@ void rtw8852b_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, chanctx_idx);
u32 tx_en;
u8 i;
@@ -3829,34 +3838,34 @@ void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_e
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
_tssi_set_tssi_slope(rtwdev, phy, i);
rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
_tmac_tx_pause(rtwdev, phy, true);
if (hwtx_en)
- _tssi_alimentk(rtwdev, phy, i);
+ _tssi_alimentk(rtwdev, phy, i, chan);
_tmac_tx_pause(rtwdev, phy, false);
rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
u8 channel = chan->channel;
u8 band;
@@ -3879,24 +3888,25 @@ void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RTW8852B_TSSI_PATH_NR; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
if (tssi_info->alignment_done[i][band])
- _tssi_alimentk_done(rtwdev, phy, i);
+ _tssi_alimentk_done(rtwdev, phy, i, chan);
else
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, bool enable)
+ enum rtw89_phy_idx phy, bool enable,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 channel = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
@@ -3904,7 +3914,7 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
if (enable) {
if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
- rtw8852b_tssi(rtwdev, phy, true);
+ rtw8852b_tssi(rtwdev, phy, true, chanctx_idx);
return;
}
@@ -3921,8 +3931,8 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_B, chan);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
@@ -3935,12 +3945,13 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
}
void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
if (scan_start)
- rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true);
+ rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
else
- rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false);
+ rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
}
static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
index f52832065600..c31ba446e6e0 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
@@ -8,16 +8,22 @@
#include "core.h"
void rtw8852b_rck(struct rtw89_dev *rtwdev);
-void rtw8852b_dack(struct rtw89_dev *rtwdev);
-void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852b_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852b_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852b_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
-void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx);
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
new file mode 100644
index 000000000000..7dfdcb5964e1
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "fw.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852bt.h"
+#include "rtw8852bt_rfk.h"
+#include "rtw8852b_common.h"
+
+#define RTW8852BT_FW_FORMAT_MAX 0
+#define RTW8852BT_FW_BASENAME "rtw89/rtw8852bt_fw"
+#define RTW8852BT_MODULE_FIRMWARE \
+ RTW8852BT_FW_BASENAME ".bin"
+
+static const struct rtw89_hfc_ch_cfg rtw8852bt_hfc_chcfg_pcie[] = {
+ {16, 742, grp_0}, /* ACH 0 */
+ {16, 742, grp_0}, /* ACH 1 */
+ {16, 742, grp_0}, /* ACH 2 */
+ {16, 742, grp_0}, /* ACH 3 */
+ {0, 0, grp_0}, /* ACH 4 */
+ {0, 0, grp_0}, /* ACH 5 */
+ {0, 0, grp_0}, /* ACH 6 */
+ {0, 0, grp_0}, /* ACH 7 */
+ {15, 743, grp_0}, /* B0MGQ */
+ {15, 743, grp_0}, /* B0HIQ */
+ {0, 0, grp_0}, /* B1MGQ */
+ {0, 0, grp_0}, /* B1HIQ */
+ {40, 0, 0} /* FWCMDQ */
+};
+
+static const struct rtw89_hfc_pub_cfg rtw8852bt_hfc_pubcfg_pcie = {
+ 958, /* Group 0 */
+ 0, /* Group 1 */
+ 958, /* Public Max */
+ 0 /* WP threshold */
+};
+
+static const struct rtw89_hfc_param_ini rtw8852bt_hfc_param_ini_pcie[] = {
+ [RTW89_QTA_SCC] = {rtw8852bt_hfc_chcfg_pcie, &rtw8852bt_hfc_pubcfg_pcie,
+ &rtw89_mac_size.hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_preccfg_pcie,
+ RTW89_HCIFC_POH},
+ [RTW89_QTA_INVALID] = {NULL},
+};
+
+static const struct rtw89_dle_mem rtw8852bt_dle_mem_pcie[] = {
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size23,
+ &rtw89_mac_size.ple_size9, &rtw89_mac_size.wde_qt23,
+ &rtw89_mac_size.wde_qt23, &rtw89_mac_size.ple_qt57,
+ &rtw89_mac_size.ple_qt59},
+ [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size23,
+ &rtw89_mac_size.ple_size9, &rtw89_mac_size.wde_qt23,
+ &rtw89_mac_size.wde_qt23, &rtw89_mac_size.ple_qt57,
+ &rtw89_mac_size.ple_qt_52bt_wow},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size4,
+ &rtw89_mac_size.ple_size4, &rtw89_mac_size.wde_qt4,
+ &rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt13,
+ &rtw89_mac_size.ple_qt13},
+ [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+ NULL},
+};
+
+static const u32 rtw8852bt_h2c_regs[RTW89_H2CREG_MAX] = {
+ R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1, R_AX_H2CREG_DATA2,
+ R_AX_H2CREG_DATA3
+};
+
+static const u32 rtw8852bt_c2h_regs[RTW89_C2HREG_MAX] = {
+ R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1, R_AX_C2HREG_DATA2,
+ R_AX_C2HREG_DATA3
+};
+
+static const u32 rtw8852bt_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = {
+ R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3,
+};
+
+static const struct rtw89_page_regs rtw8852bt_page_regs = {
+ .hci_fc_ctrl = R_AX_HCI_FC_CTRL,
+ .ch_page_ctrl = R_AX_CH_PAGE_CTRL,
+ .ach_page_ctrl = R_AX_ACH0_PAGE_CTRL,
+ .ach_page_info = R_AX_ACH0_PAGE_INFO,
+ .pub_page_info3 = R_AX_PUB_PAGE_INFO3,
+ .pub_page_ctrl1 = R_AX_PUB_PAGE_CTRL1,
+ .pub_page_ctrl2 = R_AX_PUB_PAGE_CTRL2,
+ .pub_page_info1 = R_AX_PUB_PAGE_INFO1,
+ .pub_page_info2 = R_AX_PUB_PAGE_INFO2,
+ .wp_page_ctrl1 = R_AX_WP_PAGE_CTRL1,
+ .wp_page_ctrl2 = R_AX_WP_PAGE_CTRL2,
+ .wp_page_info1 = R_AX_WP_PAGE_INFO1,
+};
+
+static const struct rtw89_reg_def rtw8852bt_dcfo_comp = {
+ R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK
+};
+
+static const struct rtw89_imr_info rtw8852bt_imr_info = {
+ .wdrls_imr_set = B_AX_WDRLS_IMR_SET,
+ .wsec_imr_reg = R_AX_SEC_DEBUG,
+ .wsec_imr_set = B_AX_IMR_ERROR,
+ .mpdu_tx_imr_set = 0,
+ .mpdu_rx_imr_set = 0,
+ .sta_sch_imr_set = B_AX_STA_SCHEDULER_IMR_SET,
+ .txpktctl_imr_b0_reg = R_AX_TXPKTCTL_ERR_IMR_ISR,
+ .txpktctl_imr_b0_clr = B_AX_TXPKTCTL_IMR_B0_CLR,
+ .txpktctl_imr_b0_set = B_AX_TXPKTCTL_IMR_B0_SET,
+ .txpktctl_imr_b1_reg = R_AX_TXPKTCTL_ERR_IMR_ISR_B1,
+ .txpktctl_imr_b1_clr = B_AX_TXPKTCTL_IMR_B1_CLR,
+ .txpktctl_imr_b1_set = B_AX_TXPKTCTL_IMR_B1_SET,
+ .wde_imr_clr = B_AX_WDE_IMR_CLR_V01,
+ .wde_imr_set = B_AX_WDE_IMR_SET_V01,
+ .ple_imr_clr = B_AX_PLE_IMR_CLR,
+ .ple_imr_set = B_AX_PLE_IMR_SET,
+ .host_disp_imr_clr = B_AX_HOST_DISP_IMR_CLR,
+ .host_disp_imr_set = B_AX_HOST_DISP_IMR_SET_V01,
+ .cpu_disp_imr_clr = B_AX_CPU_DISP_IMR_CLR,
+ .cpu_disp_imr_set = B_AX_CPU_DISP_IMR_SET,
+ .other_disp_imr_clr = B_AX_OTHER_DISP_IMR_CLR,
+ .other_disp_imr_set = 0,
+ .bbrpt_com_err_imr_reg = R_AX_BBRPT_COM_ERR_IMR_ISR,
+ .bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR_ISR,
+ .bbrpt_err_imr_set = 0,
+ .bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR_ISR,
+ .ptcl_imr_clr = B_AX_PTCL_IMR_CLR_ALL,
+ .ptcl_imr_set = B_AX_PTCL_IMR_SET,
+ .cdma_imr_0_reg = R_AX_DLE_CTRL,
+ .cdma_imr_0_clr = B_AX_DLE_IMR_CLR,
+ .cdma_imr_0_set = B_AX_DLE_IMR_SET,
+ .cdma_imr_1_reg = 0,
+ .cdma_imr_1_clr = 0,
+ .cdma_imr_1_set = 0,
+ .phy_intf_imr_reg = R_AX_PHYINFO_ERR_IMR,
+ .phy_intf_imr_clr = B_AX_PHYINFO_IMR_EN_ALL,
+ .phy_intf_imr_set = B_AX_PHYINFO_IMR_SET,
+ .rmac_imr_reg = R_AX_RMAC_ERR_ISR,
+ .rmac_imr_clr = B_AX_RMAC_IMR_CLR,
+ .rmac_imr_set = B_AX_RMAC_IMR_SET,
+ .tmac_imr_reg = R_AX_TMAC_ERR_IMR_ISR,
+ .tmac_imr_clr = B_AX_TMAC_IMR_CLR,
+ .tmac_imr_set = B_AX_TMAC_IMR_SET,
+};
+
+static const struct rtw89_rrsr_cfgs rtw8852bt_rrsr_cfgs = {
+ .ref_rate = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_REF_RATE_SEL, 0},
+ .rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2},
+};
+
+static const struct rtw89_rfkill_regs rtw8852bt_rfkill_regs = {
+ .pinmux = {R_AX_GPIO8_15_FUNC_SEL,
+ B_AX_PINMUX_GPIO9_FUNC_SEL_MASK,
+ 0xf},
+ .mode = {R_AX_GPIO_EXT_CTRL + 2,
+ (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16,
+ 0x0},
+};
+
+static const struct rtw89_dig_regs rtw8852bt_dig_regs = {
+ .seg0_pd_reg = R_SEG0R_PD_V1,
+ .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
+ .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1,
+ .bmode_pd_reg = R_BMODE_PDTH_EN_V1,
+ .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1,
+ .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V1,
+ .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1,
+ .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK},
+ .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK},
+ .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1},
+ .p1_tia_init = {R_PATH1_TIA_INIT_V1, B_PATH1_TIA_INIT_IDX_MSK_V1},
+ .p0_rxb_init = {R_PATH0_RXB_INIT_V1, B_PATH0_RXB_INIT_IDX_MSK_V1},
+ .p1_rxb_init = {R_PATH1_RXB_INIT_V1, B_PATH1_RXB_INIT_IDX_MSK_V1},
+ .p0_p20_pagcugc_en = {R_PATH0_P20_FOLLOW_BY_PAGCUGC_V2,
+ B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p0_s20_pagcugc_en = {R_PATH0_S20_FOLLOW_BY_PAGCUGC_V2,
+ B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_p20_pagcugc_en = {R_PATH1_P20_FOLLOW_BY_PAGCUGC_V2,
+ B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_s20_pagcugc_en = {R_PATH1_S20_FOLLOW_BY_PAGCUGC_V2,
+ B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+};
+
+static const struct rtw89_edcca_regs rtw8852bt_edcca_regs = {
+ .edcca_level = R_SEG0R_EDCCA_LVL_V1,
+ .edcca_mask = B_EDCCA_LVL_MSK0,
+ .edcca_p_mask = B_EDCCA_LVL_MSK1,
+ .ppdu_level = R_SEG0R_EDCCA_LVL_V1,
+ .ppdu_mask = B_EDCCA_LVL_MSK3,
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
+ .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
+};
+
+static const struct rtw89_btc_rf_trx_para rtw89_btc_8852bt_rf_ul[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> for BT-connected ACI issue && BTG co-rx */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 1, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {6, 1, 0, 7},
+ {13, 1, 0, 7},
+ {13, 1, 0, 7}
+};
+
+static const struct rtw89_btc_rf_trx_para rtw89_btc_8852bt_rf_dl[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 1, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {255, 1, 0, 7},
+ {255, 1, 0, 7},
+ {255, 1, 0, 7}
+};
+
+static const struct rtw89_btc_fbtc_mreg rtw89_btc_8852bt_mon_reg[] = {
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda24),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda28),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda2c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda30),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda4c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda10),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda20),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda34),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xcef4),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0x8424),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xd200),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xd220),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x980),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x4aa4),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x4778),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x476c),
+};
+
+static const u8 rtw89_btc_8852bt_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {70, 60, 50, 40};
+static const u8 rtw89_btc_8852bt_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {50, 40, 30, 20};
+
+static int rtw8852bt_pwr_on_func(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u32 ret;
+
+ rtw89_write32_set(rtwdev, R_AX_LDO_AON_CTRL0, B_AX_PD_REGU_L);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_AFSM_WLSUS_EN |
+ B_AX_AFSM_PCIE_SUS_EN);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_DIS_WLBT_PDNSUSEN_SOPC);
+ rtw89_write32_set(rtwdev, R_AX_WLLPS_CTRL, B_AX_DIS_WLBT_LPSEN_LOPC);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APDM_HPDN);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_OCP_L1_MASK, 7);
+
+ ret = read_poll_timeout(rtw89_read32, val32, val32 & B_AX_RDY_SYSPWR,
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFN_ONMAC);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFN_ONMAC),
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_CALIB_EN_V1);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL,
+ XTAL_SI_GND_SHDN_WL, XTAL_SI_GND_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL,
+ XTAL_SI_SHDN_WL, XTAL_SI_SHDN_WL);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_WEI,
+ XTAL_SI_OFF_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_EI,
+ XTAL_SI_OFF_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_RFC2RF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_WEI,
+ XTAL_SI_PON_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_EI,
+ XTAL_SI_PON_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SRAM2RFC);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_SRAM_CTRL, 0, XTAL_SI_SRAM_DIS);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_2, 0, XTAL_SI_LDO_LPS);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_4, 0, XTAL_SI_LPS_CAP);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+
+ fsleep(1000);
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+ rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+
+ if (!rtwdev->efuse.valid || rtwdev->efuse.power_k_valid)
+ goto func_en;
+
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_VOL_L1_MASK, 0x9);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_VREFPFM_L_MASK, 0xA);
+
+func_en:
+ rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN,
+ B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MPDU_PROC_EN |
+ B_AX_WD_RLS_EN | B_AX_DLE_WDE_EN | B_AX_TXPKT_CTRL_EN |
+ B_AX_STA_SCH_EN | B_AX_DLE_PLE_EN | B_AX_PKT_BUF_EN |
+ B_AX_DMAC_TBL_EN | B_AX_PKT_IN_EN | B_AX_DLE_CPUIO_EN |
+ B_AX_DISPATCHER_EN | B_AX_BBRPT_EN | B_AX_MAC_SEC_EN |
+ B_AX_DMACREG_GCKEN);
+ rtw89_write32_set(rtwdev, R_AX_CMAC_FUNC_EN,
+ B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN |
+ B_AX_FORCE_CMACREG_GCKEN | B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN |
+ B_AX_PTCLTOP_EN | B_AX_SCHEDULER_EN | B_AX_TMAC_EN |
+ B_AX_RMAC_EN);
+
+ rtw89_write32_mask(rtwdev, R_AX_EECS_EESK_FUNC_SEL,
+ B_AX_PINMUX_EESK_FUNC_SEL_MASK, 0x1);
+
+ return 0;
+}
+
+static int rtw8852bt_pwr_off_func(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u32 ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF,
+ XTAL_SI_RFC2RF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0, XTAL_SI_RF00);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0, XTAL_SI_RF10);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_SRAM2RFC,
+ XTAL_SI_SRAM2RFC);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_WEI);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BB_GLB_RSTN | B_AX_FEN_BBRSTB);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_GND_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_OFFMAC);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFM_OFFMAC),
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, SW_LPS_OPTION);
+ rtw89_write32_set(rtwdev, R_AX_SYS_SWR_CTRL1, B_AX_SYM_CTRL_SPS_PWMFREQ);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x3);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+
+ return 0;
+}
+
+static void rtw8852bt_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band,
+ enum rtw89_phy_idx phy_idx, bool en)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS,
+ B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS,
+ B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+ if (band == RTW89_BAND_2G)
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS,
+ B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS,
+ B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ fsleep(1);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx);
+ }
+}
+
+static void rtw8852bt_bb_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
+ B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI, 0x1);
+ rtw89_phy_write32_set(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB,
+ B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI, 0x1);
+ rtw89_phy_write32_set(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN);
+ rtw8852bx_bb_reset_all(rtwdev, phy_idx);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
+ B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI, 3);
+ rtw89_phy_write32_clr(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB,
+ B_P1_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI, 0x3);
+ rtw89_phy_write32_clr(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN);
+}
+
+static void rtw8852bt_set_channel(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8852bx_set_channel_bb(rtwdev, chan, phy_idx);
+ rtw8852bt_set_channel_rf(rtwdev, chan, phy_idx);
+}
+
+static void rtw8852bt_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_rf_path path)
+{
+ static const u32 tssi_trk[2] = {R_P0_TSSI_TRK, R_P1_TSSI_TRK};
+
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, tssi_trk[path], B_P0_TSSI_TRK_EN, 0x0);
+ else
+ rtw89_phy_write32_mask(rtwdev, tssi_trk[path], B_P0_TSSI_TRK_EN, 0x1);
+}
+
+static void rtw8852bt_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en,
+ u8 phy_idx, const struct rtw89_chan *chan)
+{
+ if (!rtwdev->dbcc_en) {
+ rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ rtw8852bt_tssi_scan(rtwdev, phy_idx, chan);
+ } else {
+ if (phy_idx == RTW89_PHY_0)
+ rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ else
+ rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ }
+}
+
+static void rtw8852bt_adc_en(struct rtw89_dev *rtwdev, bool en)
+{
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0xf);
+}
+
+static void rtw8852bt_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (enter) {
+ rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw8852bt_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0, chan);
+ rtw8852bt_adc_en(rtwdev, false);
+ fsleep(40);
+ rtw8852bt_bb_reset_en(rtwdev, chan->band_type, phy_idx, false);
+ } else {
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw8852bt_adc_en(rtwdev, true);
+ rtw8852bt_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0, chan);
+ rtw8852bt_bb_reset_en(rtwdev, chan->band_type, phy_idx, true);
+ rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
+ }
+}
+
+static void rtw8852bt_rfk_init(struct rtw89_dev *rtwdev)
+{
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+
+ rtw8852bt_dpk_init(rtwdev);
+ rtw8852bt_rck(rtwdev);
+ rtw8852bt_dack(rtwdev, RTW89_CHANCTX_0);
+ rtw8852bt_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
+}
+
+static void rtw8852bt_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+ enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
+
+ rtw8852bt_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ rtw8852bt_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852bt_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8852bt_dpk(rtwdev, phy_idx, chanctx_idx);
+}
+
+static void rtw8852bt_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
+{
+ rtw8852bt_tssi_scan(rtwdev, phy_idx, chan);
+}
+
+static void rtw8852bt_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool start)
+{
+ rtw8852bt_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
+}
+
+static void rtw8852bt_rfk_track(struct rtw89_dev *rtwdev)
+{
+ rtw8852bt_dpk_track(rtwdev);
+}
+
+static void rtw8852bt_btc_set_rfe(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
+
+ if (ver->fcxinit == 7) {
+ md->md_v7.rfe_type = rtwdev->efuse.rfe_type;
+ md->md_v7.kt_ver = rtwdev->hal.cv;
+ md->md_v7.kt_ver_adie = rtwdev->hal.acv;
+ md->md_v7.bt_solo = 0;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ md->md_v7.switch_type = BTC_SWITCH_INTERNAL;
+ md->md_v7.wa_type = 0;
+
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.ant.num = 2;
+ md->md_v7.ant.isolation = 10;
+ md->md_v7.ant.diversity = 0;
+ /* WL 1-stream+1-Ant is located at 0:s0(path-A) or 1:s1(path-B) */
+ md->md_v7.ant.single_pos = RF_PATH_A;
+ md->md_v7.ant.btg_pos = RF_PATH_B;
+
+ if (md->md_v7.rfe_type == 0) {
+ rtwdev->btc.dm.error.map.rfe_type0 = true;
+ return;
+ }
+
+ md->md_v7.ant.num = (md->md_v7.rfe_type % 2) ? 2 : 3;
+ md->md_v7.ant.stream_cnt = 2;
+ md->md_v7.wa_type |= BTC_WA_INIT_SCAN;
+
+ if (md->md_v7.ant.num == 2) {
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ md->md_v7.wa_type |= BTC_WA_HFP_LAG;
+ } else {
+ md->md_v7.ant.type = BTC_ANT_DEDICATED;
+ md->md_v7.bt_pos = BTC_BT_ALONE;
+ }
+ } else {
+ return;
+ }
+}
+
+static void
+rtw8852bt_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val)
+{
+ u16 ctrl_all_time = u32_get_bits(txpwr_val, GENMASK(15, 0));
+ u16 ctrl_gnt_bt = u32_get_bits(txpwr_val, GENMASK(31, 16));
+
+ switch (ctrl_all_time) {
+ case 0xffff:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_RATE_CTRL,
+ B_AX_FORCE_PWR_BY_RATE_EN, 0x0);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_RATE_CTRL,
+ B_AX_FORCE_PWR_BY_RATE_VALUE_MASK, 0x0);
+ break;
+ default:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_RATE_CTRL,
+ B_AX_FORCE_PWR_BY_RATE_VALUE_MASK,
+ ctrl_all_time);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_RATE_CTRL,
+ B_AX_FORCE_PWR_BY_RATE_EN, 0x1);
+ break;
+ }
+
+ switch (ctrl_gnt_bt) {
+ case 0xffff:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_COEXT_CTRL,
+ B_AX_TXAGC_BT_EN, 0x0);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_COEXT_CTRL,
+ B_AX_TXAGC_BT_MASK, 0x0);
+ break;
+ default:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_COEXT_CTRL,
+ B_AX_TXAGC_BT_MASK, ctrl_gnt_bt);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_COEXT_CTRL,
+ B_AX_TXAGC_BT_EN, 0x1);
+ break;
+ }
+}
+
+static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
+ .enable_bb_rf = rtw8852bx_mac_enable_bb_rf,
+ .disable_bb_rf = rtw8852bx_mac_disable_bb_rf,
+ .bb_preinit = NULL,
+ .bb_postinit = NULL,
+ .bb_reset = rtw8852bt_bb_reset,
+ .bb_sethw = rtw8852bx_bb_sethw,
+ .read_rf = rtw89_phy_read_rf_v1,
+ .write_rf = rtw89_phy_write_rf_v1,
+ .set_channel = rtw8852bt_set_channel,
+ .set_channel_help = rtw8852bt_set_channel_help,
+ .read_efuse = rtw8852bx_read_efuse,
+ .read_phycap = rtw8852bx_read_phycap,
+ .fem_setup = NULL,
+ .rfe_gpio = NULL,
+ .rfk_hw_init = NULL,
+ .rfk_init = rtw8852bt_rfk_init,
+ .rfk_init_late = NULL,
+ .rfk_channel = rtw8852bt_rfk_channel,
+ .rfk_band_changed = rtw8852bt_rfk_band_changed,
+ .rfk_scan = rtw8852bt_rfk_scan,
+ .rfk_track = rtw8852bt_rfk_track,
+ .power_trim = rtw8852bx_power_trim,
+ .set_txpwr = rtw8852bx_set_txpwr,
+ .set_txpwr_ctrl = rtw8852bx_set_txpwr_ctrl,
+ .init_txpwr_unit = rtw8852bx_init_txpwr_unit,
+ .get_thermal = rtw8852bx_get_thermal,
+ .ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
+ .query_ppdu = rtw8852bx_query_ppdu,
+ .convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
+ .ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
+ .cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
+ .set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
+ .pwr_on_func = rtw8852bt_pwr_on_func,
+ .pwr_off_func = rtw8852bt_pwr_off_func,
+ .query_rxdesc = rtw89_core_query_rxdesc,
+ .fill_txdesc = rtw89_core_fill_txdesc,
+ .fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
+ .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
+ .mac_cfg_gnt = rtw89_mac_cfg_gnt,
+ .stop_sch_tx = rtw89_mac_stop_sch_tx,
+ .resume_sch_tx = rtw89_mac_resume_sch_tx,
+ .h2c_dctl_sec_cam = NULL,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
+
+ .btc_set_rfe = rtw8852bt_btc_set_rfe,
+ .btc_init_cfg = rtw8852bx_btc_init_cfg,
+ .btc_set_wl_pri = rtw8852bx_btc_set_wl_pri,
+ .btc_set_wl_txpwr_ctrl = rtw8852bt_btc_set_wl_txpwr_ctrl,
+ .btc_get_bt_rssi = rtw8852bx_btc_get_bt_rssi,
+ .btc_update_bt_cnt = rtw8852bx_btc_update_bt_cnt,
+ .btc_wl_s1_standby = rtw8852bx_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = rtw8852bx_btc_set_wl_rx_gain,
+ .btc_set_policy = rtw89_btc_set_policy_v1,
+};
+
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support rtw_wowlan_stub_8852bt = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .n_patterns = RTW89_MAX_PATTERN_NUM,
+ .pattern_max_len = RTW89_MAX_PATTERN_SIZE,
+ .pattern_min_len = 1,
+};
+#endif
+
+const struct rtw89_chip_info rtw8852bt_chip_info = {
+ .chip_id = RTL8852BT,
+ .chip_gen = RTW89_CHIP_AX,
+ .ops = &rtw8852bt_chip_ops,
+ .mac_def = &rtw89_mac_gen_ax,
+ .phy_def = &rtw89_phy_gen_ax,
+ .fw_basename = RTW8852BT_FW_BASENAME,
+ .fw_format_max = RTW8852BT_FW_FORMAT_MAX,
+ .try_ce_fw = true,
+ .bbmcu_nr = 0,
+ .needed_fw_elms = RTW89_AX_GEN_DEF_NEEDED_FW_ELEMENTS_NO_6GHZ,
+ .fifo_size = 458752,
+ .small_fifo_size = true,
+ .dle_scc_rsvd_size = 98304,
+ .max_amsdu_limit = 5000,
+ .dis_2g_40m_ul_ofdma = true,
+ .rsvd_ple_ofst = 0x6f800,
+ .hfc_param_ini = rtw8852bt_hfc_param_ini_pcie,
+ .dle_mem = rtw8852bt_dle_mem_pcie,
+ .wde_qempty_acq_grpnum = 4,
+ .wde_qempty_mgq_grpsel = 4,
+ .rf_base_addr = {0xe000, 0xf000},
+ .pwr_on_seq = NULL,
+ .pwr_off_seq = NULL,
+ .bb_table = NULL,
+ .bb_gain_table = NULL,
+ .rf_table = {},
+ .nctl_table = NULL,
+ .nctl_post_table = NULL,
+ .dflt_parms = NULL,
+ .rfe_parms_conf = NULL,
+ .txpwr_factor_rf = 2,
+ .txpwr_factor_mac = 1,
+ .dig_table = NULL,
+ .dig_regs = &rtw8852bt_dig_regs,
+ .tssi_dbw_table = NULL,
+ .support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
+ .support_chanctx_num = 1,
+ .support_rnr = false,
+ .support_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ),
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+ .support_unii4 = true,
+ .ul_tb_waveform_ctrl = true,
+ .ul_tb_pwr_diff = false,
+ .hw_sec_hdr = false,
+ .hw_mgmt_tx_encrypt = false,
+ .rf_path_num = 2,
+ .tx_nss = 2,
+ .rx_nss = 2,
+ .acam_num = 128,
+ .bcam_num = 10,
+ .scam_num = 128,
+ .bacam_num = 2,
+ .bacam_dynamic_num = 4,
+ .bacam_ver = RTW89_BACAM_V0,
+ .ppdu_max_usr = 4,
+ .sec_ctrl_efuse_size = 4,
+ .physical_efuse_size = 1216,
+ .logical_efuse_size = 2048,
+ .limit_efuse_size = 1280,
+ .dav_phy_efuse_size = 96,
+ .dav_log_efuse_size = 16,
+ .efuse_blocks = NULL,
+ .phycap_addr = 0x580,
+ .phycap_size = 128,
+ .para_ver = 0,
+ .wlcx_desired = 0x070e0000,
+ .btcx_desired = 0x7,
+ .scbd = 0x1,
+ .mailbox = 0x1,
+
+ .afh_guard_ch = 6,
+ .wl_rssi_thres = rtw89_btc_8852bt_wl_rssi_thres,
+ .bt_rssi_thres = rtw89_btc_8852bt_bt_rssi_thres,
+ .rssi_tol = 2,
+ .mon_reg_num = ARRAY_SIZE(rtw89_btc_8852bt_mon_reg),
+ .mon_reg = rtw89_btc_8852bt_mon_reg,
+ .rf_para_ulink_num = ARRAY_SIZE(rtw89_btc_8852bt_rf_ul),
+ .rf_para_ulink = rtw89_btc_8852bt_rf_ul,
+ .rf_para_dlink_num = ARRAY_SIZE(rtw89_btc_8852bt_rf_dl),
+ .rf_para_dlink = rtw89_btc_8852bt_rf_dl,
+ .ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
+ BIT(RTW89_PS_MODE_CLK_GATED) |
+ BIT(RTW89_PS_MODE_PWR_GATED),
+ .low_power_hci_modes = 0,
+ .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD,
+ .hci_func_en_addr = R_AX_HCI_FUNC_EN,
+ .h2c_desc_size = sizeof(struct rtw89_txwd_body),
+ .txwd_body_size = sizeof(struct rtw89_txwd_body),
+ .txwd_info_size = sizeof(struct rtw89_txwd_info),
+ .h2c_ctrl_reg = R_AX_H2CREG_CTRL,
+ .h2c_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8},
+ .h2c_regs = rtw8852bt_h2c_regs,
+ .c2h_ctrl_reg = R_AX_C2HREG_CTRL,
+ .c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
+ .c2h_regs = rtw8852bt_c2h_regs,
+ .page_regs = &rtw8852bt_page_regs,
+ .wow_reason_reg = rtw8852bt_wow_wakeup_regs,
+ .cfo_src_fd = true,
+ .cfo_hw_comp = true,
+ .dcfo_comp = &rtw8852bt_dcfo_comp,
+ .dcfo_comp_sft = 10,
+ .imr_info = &rtw8852bt_imr_info,
+ .imr_dmac_table = NULL,
+ .imr_cmac_table = NULL,
+ .rrsr_cfgs = &rtw8852bt_rrsr_cfgs,
+ .bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0},
+ .bss_clr_map_reg = R_BSS_CLR_MAP_V1,
+ .rfkill_init = &rtw8852bt_rfkill_regs,
+ .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
+ .dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
+ BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
+ BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
+ .edcca_regs = &rtw8852bt_edcca_regs,
+#ifdef CONFIG_PM
+ .wowlan_stub = &rtw_wowlan_stub_8852bt,
+#endif
+ .xtal_info = NULL,
+};
+EXPORT_SYMBOL(rtw8852bt_chip_info);
+
+MODULE_FIRMWARE(RTW8852BT_MODULE_FIRMWARE);
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852BT driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt.h
index 6177f36ad667..b76b36aaf025 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.h
@@ -10,4 +10,6 @@
#define RF_PATH_NUM_8852BT 2
#define BB_PATH_NUM_8852BT 2
+extern const struct rtw89_chip_info rtw8852bt_chip_info;
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
index fa0e49d58112..336a83e1d46b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
@@ -1525,9 +1525,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u
lok_result, txk_result, rxk_result);
}
-static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 get_empty_table = false;
u32 reg_rf18;
@@ -1755,12 +1756,13 @@ static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1770,7 +1772,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852BT_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, backup_bb_val);
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
@@ -1785,20 +1787,21 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u8 kpath = _kpath(rtwdev, phy_idx);
switch (kpath) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1824,7 +1827,7 @@ static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool o
BIT(24), val);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
- kidx, dpk->is_dpk_enable & off_reverse ? "enable" : "disable");
+ kidx, str_enable_disable(dpk->is_dpk_enable & off_reverse));
}
static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
@@ -1863,7 +1866,7 @@ static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
id == 0x14 ? "PWR_CAL" :
id == 0x15 ? "DPK_RXAGC" :
id == 0x16 ? "KIP_PRESET" :
- id == 0x17 ? "KIP_RESOTRE" :
+ id == 0x17 ? "KIP_RESTORE" :
"DPK_TXAGC", dpk_cmd);
}
@@ -1879,9 +1882,9 @@ static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -2277,9 +2280,9 @@ static bool _dpk_pas_read(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
- bool loss_only)
+ bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 goout = 0, agc_cnt = 0, limited_rxbb = 0, gl_cnt = 0;
u8 tmp_txagc, tmp_rxbb, tmp_gl_idx = 0;
@@ -2504,9 +2507,9 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 idx, cur_band, cur_ch;
bool is_reload = false;
@@ -2549,7 +2552,8 @@ void _drf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool i
}
static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 gain)
+ enum rtw89_rf_path path, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 txagc = 0x38, kidx = dpk->cur_idx[path];
@@ -2569,7 +2573,7 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
_dpk_kip_set_rxagc(rtwdev, phy, path);
_dpk_table_select(rtwdev, path, kidx, gain);
- txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
_rfk_get_thermal(rtwdev, kidx, path);
@@ -2601,7 +2605,8 @@ _error:
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u32 backup_kip_val[BACKUP_KIP_REGS_NR];
@@ -2611,7 +2616,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
u8 path;
for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path, chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch != 0)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2623,7 +2628,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
if (rtwdev->is_tssi_mode[path])
_dpk_tssi_pause(rtwdev, path, true);
}
@@ -2631,7 +2636,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
_rfk_bb_afe_setting(rtwdev, phy, path, kpath);
for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++)
- _dpk_main(rtwdev, phy, path, 1);
+ _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
_rfk_bb_afe_restore(rtwdev, phy, path, kpath);
@@ -2646,9 +2651,10 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
}
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_fem_info *fem = &rtwdev->fem;
if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
@@ -2817,9 +2823,8 @@ static void _tssi_dpk_off(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
@@ -2829,9 +2834,8 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852bt_tssi_sys_defs_tbl);
@@ -2878,7 +2882,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8852BT_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2893,7 +2897,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
})
struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -3047,9 +3050,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A)
@@ -3063,9 +3065,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool all)
+ enum rtw89_rf_path path, bool all,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
const struct rtw89_rfk_tbl *tbl = NULL;
u8 ch = chan->channel;
@@ -3310,10 +3312,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st;
@@ -3346,10 +3347,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st;
@@ -3383,10 +3383,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
return val;
}
-static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3399,7 +3399,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3415,8 +3415,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3463,10 +3463,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p
}
static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 channel = chan->channel;
u8 band;
@@ -3500,7 +3500,7 @@ static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
- u8 enable)
+ u8 enable, const struct rtw89_chan *chan)
{
enum rtw89_rf_path_bit rx_path;
@@ -3516,11 +3516,11 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (enable) {
rtw8852bx_bb_set_plcp_tx(rtwdev);
rtw8852bx_bb_cfg_tx_path(rtwdev, path);
- rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
}
- rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
+ rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy, chan);
}
static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
@@ -3574,7 +3574,7 @@ static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, const s16 *power,
- u32 *tssi_cw_rpt)
+ u32 *tssi_cw_rpt, const struct rtw89_chan *chan)
{
u32 tx_counter, tx_counter_tmp;
const int retry = 100;
@@ -3593,9 +3593,11 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
_tssi_trigger[path], tmp, path);
if (j == 0)
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true,
+ chan);
else
- _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true,
+ chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3626,7 +3628,7 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
"[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
k, path);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
return false;
}
@@ -3634,7 +3636,7 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
B_TSSI_CWRPT);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3648,14 +3650,13 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
0x78e4, 0x49c0, 0x0d18, 0x0d80};
static const s16 power_2g[4] = {48, 20, 4, -8};
static const s16 power_5g[4] = {48, 20, 4, 4};
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
u32 tssi_cw_rpt[RTW8852BT_TSSI_PATH_NR] = {};
u8 channel = chan->channel;
@@ -3701,7 +3702,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
- ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
+ ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan);
if (!ok)
goto out;
@@ -3833,18 +3834,19 @@ void rtw8852bt_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852bt_dack(struct rtw89_dev *rtwdev)
+void rtw8852bt_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
_dac_cal(rtwdev, false);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
@@ -3852,15 +3854,16 @@ void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
-void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
@@ -3873,15 +3876,16 @@ void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x) ******\n", RTW8852BT_DPK_VER);
- if (_dpk_bypass_check(rtwdev, phy_idx))
+ if (_dpk_bypass_check(rtwdev, phy_idx, chanctx_idx))
_dpk_force_bypass(rtwdev, phy_idx);
else
- _dpk_cal_select(rtwdev, phy_idx, RF_AB);
+ _dpk_cal_select(rtwdev, phy_idx, RF_AB, chanctx_idx);
}
void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev)
@@ -3889,10 +3893,12 @@ void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
static const u32 reg[2] = {R_DPD_CH0A, R_DPD_CH0B};
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, chanctx_idx);
u32 reg_backup[2] = {};
u32 tx_en;
u8 i;
@@ -3905,36 +3911,36 @@ void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
_tssi_set_tssi_slope(rtwdev, phy, i);
rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
_tmac_tx_pause(rtwdev, phy, true);
if (hwtx_en)
- _tssi_alimentk(rtwdev, phy, i);
+ _tssi_alimentk(rtwdev, phy, i, chan);
_tmac_tx_pause(rtwdev, phy, false);
rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
_tssi_reload_bb_registers(rtwdev, phy, reg, reg_backup, 2);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
u8 channel = chan->channel;
u8 band;
@@ -3957,24 +3963,25 @@ void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RTW8852BT_TSSI_PATH_NR; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
if (tssi_info->alignment_done[i][band])
- _tssi_alimentk_done(rtwdev, phy, i);
+ _tssi_alimentk_done(rtwdev, phy, i, chan);
else
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, bool enable)
+ enum rtw89_phy_idx phy, bool enable,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 channel = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
@@ -3996,8 +4003,8 @@ static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_B, chan);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
@@ -4010,10 +4017,237 @@ static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
}
void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
if (scan_start)
- rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true);
+ rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
else
- rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false);
+ rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
+}
+
+static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ enum rtw89_bandwidth bw, bool dav)
+{
+ u32 rf_reg18;
+ u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
+
+ rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
+ if (rf_reg18 == INV_RF_DATA) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]Invalid RF_0x18 for Path-%d\n", path);
+ return;
+ }
+ rf_reg18 &= ~RR_CFGCH_BW;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n");
+ }
+
+ rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
+ RR_CFGCH_BW2) & RFREG_MASK;
+ rf_reg18 |= RR_CFGCH_BW2;
+ rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n",
+ bw, path, reg18_addr,
+ rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
+}
+
+static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_bandwidth bw)
+{
+ _bw_setting(rtwdev, RF_PATH_A, bw, true);
+ _bw_setting(rtwdev, RF_PATH_B, bw, true);
+ _bw_setting(rtwdev, RF_PATH_A, bw, false);
+ _bw_setting(rtwdev, RF_PATH_B, bw, false);
+}
+
+static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val)
+{
+ u32 tmp;
+ int ret;
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val);
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000,
+ false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n");
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
+ return !!ret;
+}
+
+static void _lck_check(struct rtw89_dev *rtwdev)
+{
+ u32 tmp;
+
+ if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n");
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0);
+ }
+
+ udelay(10);
+
+ if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n");
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ _set_s0_arfc18(rtwdev, tmp);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
+ }
+
+ if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n");
+
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ _set_s0_arfc18(rtwdev, tmp);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n",
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK),
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK));
+ }
+}
+
+static void _set_ch(struct rtw89_dev *rtwdev, u32 val)
+{
+ bool timeout;
+ u32 bak;
+
+ bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1);
+ timeout = _set_s0_arfc18(rtwdev, val);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak);
+ if (!timeout)
+ _lck_check(rtwdev);
+}
+
+static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ u8 central_ch, bool dav)
+{
+ u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
+ bool is_2g_ch = central_ch <= 14;
+ u32 rf_reg18;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
+
+ rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
+ rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH |
+ RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH);
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
+
+ if (!is_2g_ch)
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) |
+ FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
+
+ rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
+ RR_CFGCH_BW2) & RFREG_MASK;
+ rf_reg18 |= RR_CFGCH_BW2;
+
+ if (path == RF_PATH_A && dav)
+ _set_ch(rtwdev, rf_reg18);
+ else
+ rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
+
+ rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0);
+ rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n",
+ central_ch, path, reg18_addr,
+ rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
+}
+
+static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch)
+{
+ _ch_setting(rtwdev, RF_PATH_A, central_ch, true);
+ _ch_setting(rtwdev, RF_PATH_B, central_ch, true);
+ _ch_setting(rtwdev, RF_PATH_A, central_ch, false);
+ _ch_setting(rtwdev, RF_PATH_B, central_ch, false);
+}
+
+static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
+ enum rtw89_rf_path path)
+{
+ rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12);
+
+ if (bw == RTW89_CHANNEL_WIDTH_20)
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b);
+ else if (bw == RTW89_CHANNEL_WIDTH_40)
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13);
+ else if (bw == RTW89_CHANNEL_WIDTH_80)
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb);
+ else
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n",
+ path, rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB));
+
+ rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
+}
+
+static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_bandwidth bw)
+{
+ u8 kpath, path;
+
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ _set_rxbb_bw(rtwdev, bw, path);
+ }
+}
+
+static void rtw8852bt_ctrl_bw_ch(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, u8 central_ch,
+ enum rtw89_band band, enum rtw89_bandwidth bw)
+{
+ _ctrl_ch(rtwdev, central_ch);
+ _ctrl_bw(rtwdev, phy, bw);
+ _rxbb_bw(rtwdev, phy, bw);
+}
+
+void rtw8852bt_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bt_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type,
+ chan->band_width);
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
index 09918835c6e8..e34560b4905f 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
@@ -8,15 +8,24 @@
#include "core.h"
void rtw8852bt_rck(struct rtw89_dev *rtwdev);
-void rtw8852bt_dack(struct rtw89_dev *rtwdev);
-void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852bt_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
-void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx);
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852bt_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
new file mode 100644
index 000000000000..702948119646
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "reg.h"
+#include "rtw8852bt.h"
+
+static const struct rtw89_pci_info rtw8852bt_pci_info = {
+ .gen_def = &rtw89_pci_gen_ax,
+ .txbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_mode = MAC_AX_RXBD_PKT,
+ .tag_mode = MAC_AX_TAG_MULTI,
+ .tx_burst = MAC_AX_TX_BURST_2048B,
+ .rx_burst = MAC_AX_RX_BURST_128B,
+ .wd_dma_idle_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .wd_dma_act_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .multi_tag_num = MAC_AX_TAG_NUM_8,
+ .lbc_en = MAC_AX_PCIE_ENABLE,
+ .lbc_tmr = MAC_AX_LBC_TMR_2MS,
+ .autok_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+ .rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
+
+ .init_cfg_reg = R_AX_PCIE_INIT_CFG1,
+ .txhci_en_bit = B_AX_TXHCI_EN,
+ .rxhci_en_bit = B_AX_RXHCI_EN,
+ .rxbd_mode_bit = B_AX_RXBD_MODE,
+ .exp_ctrl_reg = R_AX_PCIE_EXP_CTRL,
+ .max_tag_num_mask = B_AX_MAX_TAG_NUM,
+ .rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR,
+ .txbd_rwptr_clr2_reg = 0,
+ .dma_io_stop = {R_AX_PCIE_DMA_STOP1, B_AX_STOP_PCIEIO},
+ .dma_stop1 = {R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_MASK_V1},
+ .dma_stop2 = {0},
+ .dma_busy1 = {R_AX_PCIE_DMA_BUSY1, DMA_BUSY1_CHECK_V1},
+ .dma_busy2_reg = 0,
+ .dma_busy3_reg = R_AX_PCIE_DMA_BUSY1,
+
+ .rpwm_addr = R_AX_PCIE_HRPWM,
+ .cpwm_addr = R_AX_CPWM,
+ .mit_addr = R_AX_INT_MIT_RX,
+ .wp_sel_addr = 0,
+ .tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) |
+ BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) |
+ BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
+ .bd_idx_addr_low_power = NULL,
+ .dma_addr_set = &rtw89_pci_ch_dma_addr_set,
+ .bd_ram_table = &rtw89_bd_ram_table_single,
+
+ .ltr_set = rtw89_pci_ltr_set,
+ .fill_txaddr_info = rtw89_pci_fill_txaddr_info,
+ .config_intr_mask = rtw89_pci_config_intr_mask,
+ .enable_intr = rtw89_pci_enable_intr,
+ .disable_intr = rtw89_pci_disable_intr,
+ .recognize_intrs = rtw89_pci_recognize_intrs,
+};
+
+static const struct rtw89_driver_info rtw89_8852bte_info = {
+ .chip = &rtw8852bt_chip_info,
+ .quirks = NULL,
+ .bus = {
+ .pci = &rtw8852bt_pci_info,
+ },
+};
+
+static const struct pci_device_id rtw89_8852bte_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xb520),
+ .driver_data = (kernel_ulong_t)&rtw89_8852bte_info,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw89_8852bte_id_table);
+
+static struct pci_driver rtw89_8852bte_driver = {
+ .name = "rtw89_8852bte",
+ .id_table = rtw89_8852bte_id_table,
+ .probe = rtw89_pci_probe,
+ .remove = rtw89_pci_remove,
+ .driver.pm = &rtw89_pm_ops,
+};
+module_pci_driver(rtw89_8852bte_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852BE-VT driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 193168dc7b6c..1c6e89ab0f4b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -14,10 +14,10 @@
#include "rtw8852c_table.h"
#include "util.h"
-#define RTW8852C_FW_FORMAT_MAX 0
+#define RTW8852C_FW_FORMAT_MAX 1
#define RTW8852C_FW_BASENAME "rtw89/rtw8852c_fw"
#define RTW8852C_MODULE_FIRMWARE \
- RTW8852C_FW_BASENAME ".bin"
+ RTW8852C_FW_BASENAME "-" __stringify(RTW8852C_FW_FORMAT_MAX) ".bin"
static const struct rtw89_hfc_ch_cfg rtw8852c_hfc_chcfg_pcie[] = {
{13, 1614, grp_0}, /* ACH 0 */
@@ -147,6 +147,15 @@ static const struct rtw89_rrsr_cfgs rtw8852c_rrsr_cfgs = {
.rsc = {R_AX_PTCL_RRSR1, B_AX_RSC_MASK, 2},
};
+static const struct rtw89_rfkill_regs rtw8852c_rfkill_regs = {
+ .pinmux = {R_AX_GPIO8_15_FUNC_SEL,
+ B_AX_PINMUX_GPIO9_FUNC_SEL_MASK,
+ 0xf},
+ .mode = {R_AX_GPIO_EXT_CTRL + 2,
+ (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16,
+ 0x0},
+};
+
static const struct rtw89_dig_regs rtw8852c_dig_regs = {
.seg0_pd_reg = R_SEG0R_PD,
.pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
@@ -1808,7 +1817,7 @@ static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
RTW89_SCH_TX_SEL_ALL);
rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false);
rtw8852c_dfs_en(rtwdev, false);
- rtw8852c_tssi_cont_en_phyidx(rtwdev, false, phy_idx);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, false, phy_idx, chan);
rtw8852c_adc_en(rtwdev, false);
fsleep(40);
rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, false);
@@ -1816,7 +1825,7 @@ static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true);
rtw8852c_adc_en(rtwdev, true);
rtw8852c_dfs_en(rtwdev, true);
- rtw8852c_tssi_cont_en_phyidx(rtwdev, true, phy_idx);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, true, phy_idx, chan);
rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, true);
rtw89_chip_resume_sch_tx(rtwdev, mac_idx, p->tx_en);
}
@@ -1833,31 +1842,34 @@ static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev)
rtw8852c_dpk_init(rtwdev);
rtw8852c_rck(rtwdev);
- rtw8852c_dack(rtwdev);
+ rtw8852c_dack(rtwdev, RTW89_CHANCTX_0);
rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false);
}
-static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev)
+static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+ enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
rtw8852c_mcc_get_ch_info(rtwdev, phy_idx);
rtw8852c_rx_dck(rtwdev, phy_idx, false);
- rtw8852c_iqk(rtwdev, phy_idx);
- rtw8852c_tssi(rtwdev, phy_idx);
- rtw8852c_dpk(rtwdev, phy_idx);
+ rtw8852c_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852c_tssi(rtwdev, phy_idx, chanctx_idx);
+ rtw8852c_dpk(rtwdev, phy_idx, chanctx_idx);
rtw89_fw_h2c_rf_ntfy_mcc(rtwdev);
}
static void rtw8852c_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8852c_tssi_scan(rtwdev, phy_idx);
+ rtw8852c_tssi_scan(rtwdev, phy_idx, chan);
}
-static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool start)
{
- rtw8852c_wifi_scan_notify(rtwdev, start, RTW89_PHY_0);
+ rtw8852c_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx);
}
static void rtw8852c_rfk_track(struct rtw89_dev *rtwdev)
@@ -2108,7 +2120,7 @@ rtw8852c_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
static void rtw8852c_bb_cfg_rx_path(struct rtw89_dev *rtwdev, u8 rx_path)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 band = chan->band_type;
u32 rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI;
u32 rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI;
@@ -2840,10 +2852,12 @@ static const struct rtw89_chanctx_listener rtw8852c_chanctx_listener = {
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8852c = {
- .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_NET_DETECT,
.n_patterns = RTW89_MAX_PATTERN_NUM,
.pattern_max_len = RTW89_MAX_PATTERN_SIZE,
.pattern_min_len = 1,
+ .max_nd_match_sets = RTW89_SCANOFLD_MAX_SSID,
};
#endif
@@ -2876,9 +2890,11 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.get_thermal = rtw8852c_get_thermal,
.ctrl_btg_bt_rx = rtw8852c_ctrl_btg_bt_rx,
.query_ppdu = rtw8852c_query_ppdu,
+ .convert_rpl_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852c_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852c_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = rtw8852c_pwr_on_func,
.pwr_off_func = rtw8852c_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2946,6 +2962,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dig_regs = &rtw8852c_dig_regs,
.tssi_dbw_table = &rtw89_8852c_tssi_dbw_table,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 2,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -2959,6 +2976,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
.hw_sec_hdr = true,
+ .hw_mgmt_tx_encrypt = true,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
@@ -3022,6 +3040,8 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.rrsr_cfgs = &rtw8852c_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0},
.bss_clr_map_reg = R_BSS_CLR_MAP,
+ .rfkill_init = &rtw8852c_rfkill_regs,
+ .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
.dma_ch_mask = 0,
.edcca_regs = &rtw8852c_edcca_regs,
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index 743f7014bf3e..211c051c2967 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -5,6 +5,7 @@
#include "chan.h"
#include "coex.h"
#include "debug.h"
+#include "fw.h"
#include "phy.h"
#include "reg.h"
#include "rtw8852c.h"
@@ -584,11 +585,12 @@ static void _drck(struct rtw89_dev *rtwdev)
rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
}
-static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dack_info *dack = &rtwdev->dack;
u32 rf0_0, rf1_0;
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, chanctx_idx);
dack->dack_done = false;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
@@ -1321,9 +1323,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u
}
static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, u8 path)
+ enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
@@ -1516,12 +1519,13 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1531,7 +1535,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852C_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, backup_bb_val);
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
_iqk_macbb_setting(rtwdev, phy_idx, path);
@@ -1544,18 +1548,19 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
switch (_kpath(rtwdev, phy_idx)) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1901,9 +1906,9 @@ static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _dpk_information(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -2495,9 +2500,9 @@ static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
bool is_reload = false;
u8 idx, cur_band, cur_ch;
@@ -2689,7 +2694,8 @@ static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_byb
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0c4, 0xc0e8, 0xc0d4, 0xc0d8};
@@ -2705,7 +2711,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (!(kpath & BIT(path)))
continue;
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
+ chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch != 0)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2722,7 +2729,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
path, dpk->cur_idx[path]);
_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
_dpk_init(rtwdev, path);
if (rtwdev->is_tssi_mode[path])
_dpk_tssi_pause(rtwdev, path, true);
@@ -2755,10 +2762,11 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
_dpk_kip_pwr_clk_onoff(rtwdev, false);
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_fem_info *fem = &rtwdev->fem;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 band = chan->band_type;
if (rtwdev->hal.cv == CHIP_CAV && band != RTW89_BAND_2G) {
@@ -2790,17 +2798,18 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
RTW8852C_DPK_VER, rtwdev->hal.cv,
RTW8852C_RF_REL_VERSION);
- if (_dpk_bypass_check(rtwdev, phy))
+ if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
_dpk_force_bypass(rtwdev, phy);
else
- _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), chanctx_idx);
if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1)
rtw8852c_rx_dck(rtwdev, phy, false);
@@ -2891,9 +2900,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev)
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_bandwidth bw = chan->band_width;
enum rtw89_band band = chan->band_type;
u32 clk = 0x0;
@@ -2945,9 +2953,8 @@ static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
}
static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A) {
@@ -2972,7 +2979,7 @@ static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8852C_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2985,8 +2992,8 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
} \
__val; \
})
+ struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -3001,56 +3008,88 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
switch (subband) {
default:
case RTW89_CH_2G:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
break;
case RTW89_CH_5G_BAND_1:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
break;
case RTW89_CH_5G_BAND_3:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
break;
case RTW89_CH_5G_BAND_4:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
break;
case RTW89_CH_6G_BAND_IDX0:
case RTW89_CH_6G_BAND_IDX1:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
break;
case RTW89_CH_6G_BAND_IDX2:
case RTW89_CH_6G_BAND_IDX3:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
break;
case RTW89_CH_6G_BAND_IDX4:
case RTW89_CH_6G_BAND_IDX5:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
break;
case RTW89_CH_6G_BAND_IDX6:
case RTW89_CH_6G_BAND_IDX7:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
break;
}
@@ -3158,9 +3197,8 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A) {
@@ -3175,9 +3213,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
const struct rtw89_rfk_tbl *tbl;
@@ -3586,10 +3624,9 @@ static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
@@ -3650,10 +3687,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
@@ -3715,10 +3751,9 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
}
static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy)
+ enum rtw89_phy_idx phy, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3741,7 +3776,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
for (i = path; i < path_max; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3757,8 +3792,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3781,7 +3816,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
}
static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
static const u32 tssi_trk[2] = {0x5818, 0x7818};
static const u32 tssi_en[2] = {0x5820, 0x7820};
@@ -3790,25 +3825,26 @@ static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0);
rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0);
if (rtwdev->dbcc_en && path == RF_PATH_B)
- _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1);
+ _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1, chan);
else
- _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0);
+ _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0, chan);
} else {
rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1);
rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1);
}
}
-void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
+void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx,
+ const struct rtw89_chan *chan)
{
if (!rtwdev->dbcc_en) {
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A, chan);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B, chan);
} else {
if (phy_idx == RTW89_PHY_0)
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A, chan);
else
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B, chan);
}
}
@@ -4079,10 +4115,10 @@ void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_i
mode = rtw89_get_entity_mode(rtwdev);
switch (mode) {
case RTW89_ENTITY_MODE_MCC_PREPARE:
- chan_idx = RTW89_SUB_ENTITY_1;
+ chan_idx = RTW89_CHANCTX_1;
break;
default:
- chan_idx = RTW89_SUB_ENTITY_0;
+ chan_idx = RTW89_CHANCTX_0;
break;
}
@@ -4112,26 +4148,27 @@ void rtw8852c_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852c_dack(struct rtw89_dev *rtwdev)
+void rtw8852c_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
- _dac_cal(rtwdev, false);
+ _dac_cal(rtwdev, false, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
@@ -4202,10 +4239,11 @@ void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_a
void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_chanctx_idx chanctx_idx = RTW89_CHANCTX_0;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u8 dck_channel;
u8 cur_thermal;
u32 tx_en;
@@ -4259,16 +4297,17 @@ void rtw8852c_dpk_init(struct rtw89_dev *rtwdev)
dpk->is_dpk_reload_en = false;
}
-void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -4279,8 +4318,10 @@ void rtw8852c_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
@@ -4298,23 +4339,24 @@ void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = path; i < path_max; i++) {
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
- _tssi_set_dck(rtwdev, phy, i);
+ _tssi_set_dck(rtwdev, phy, i, chan);
_tssi_set_bbgain_split(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_set_aligk_default(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_set_aligk_default(rtwdev, phy, i, chan);
_tssi_set_slope(rtwdev, phy, i);
_tssi_run_slope(rtwdev, phy, i);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
-void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
@@ -4339,15 +4381,15 @@ void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = path; i < path_max; i++) {
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_set_aligk_default(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_dck(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_set_aligk_default(rtwdev, phy, i, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev,
@@ -4422,7 +4464,7 @@ void rtw8852c_rfk_chanctx_cb(struct rtw89_dev *rtwdev,
dpk->is_dpk_enable = true;
for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
_dpk_onoff(rtwdev, path, false);
- rtw8852c_dpk(rtwdev, RTW89_PHY_0);
+ rtw8852c_dpk(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
break;
default:
break;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
index 6605137e61aa..306dd0a0be73 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
@@ -9,16 +9,21 @@
void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
void rtw8852c_rck(struct rtw89_dev *rtwdev);
-void rtw8852c_dack(struct rtw89_dev *rtwdev);
-void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852c_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe);
void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev);
void rtw8852c_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852c_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
-void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
-void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx);
+void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
+void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx,
+ const struct rtw89_chan *chan);
void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
enum rtw89_phy_idx phy_idx);
void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 2af568a3264d..63b1ff2f98ed 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -13,10 +13,10 @@
#include "rtw8922a_rfk.h"
#include "util.h"
-#define RTW8922A_FW_FORMAT_MAX 0
+#define RTW8922A_FW_FORMAT_MAX 1
#define RTW8922A_FW_BASENAME "rtw89/rtw8922a_fw"
#define RTW8922A_MODULE_FIRMWARE \
- RTW8922A_FW_BASENAME ".bin"
+ RTW8922A_FW_BASENAME "-" __stringify(RTW8922A_FW_FORMAT_MAX) ".bin"
#define HE_N_USER_MAX_8922A 4
@@ -165,6 +165,15 @@ static const struct rtw89_rrsr_cfgs rtw8922a_rrsr_cfgs = {
.rsc = {R_BE_PTCL_RRSR1, B_BE_RSC_MASK, 2},
};
+static const struct rtw89_rfkill_regs rtw8922a_rfkill_regs = {
+ .pinmux = {R_BE_GPIO8_15_FUNC_SEL,
+ B_BE_PINMUX_GPIO9_FUNC_SEL_MASK,
+ 0xf},
+ .mode = {R_BE_GPIO_EXT_CTRL + 2,
+ (B_BE_GPIO_MOD_9 | B_BE_GPIO_IO_SEL_9) >> 16,
+ 0x0},
+};
+
static const struct rtw89_dig_regs rtw8922a_dig_regs = {
.seg0_pd_reg = R_SEG0R_PD_V2,
.pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
@@ -1680,9 +1689,63 @@ static int rtw8922a_ctrl_rx_path_tmac(struct rtw89_dev *rtwdev,
return 0;
}
+#define DIGITAL_PWR_COMP_REG_NUM 22
+static const u32 rtw8922a_digital_pwr_comp_val[][DIGITAL_PWR_COMP_REG_NUM] = {
+ {0x012C0096, 0x044C02BC, 0x00322710, 0x015E0096, 0x03C8028A,
+ 0x0BB80708, 0x17701194, 0x02020100, 0x03030303, 0x01000303,
+ 0x05030302, 0x06060605, 0x06050300, 0x0A090807, 0x02000B0B,
+ 0x09080604, 0x0D0D0C0B, 0x08060400, 0x110F0C0B, 0x05001111,
+ 0x0D0C0907, 0x12121210},
+ {0x012C0096, 0x044C02BC, 0x00322710, 0x015E0096, 0x03C8028A,
+ 0x0BB80708, 0x17701194, 0x04030201, 0x05050505, 0x01000505,
+ 0x07060504, 0x09090908, 0x09070400, 0x0E0D0C0B, 0x03000E0E,
+ 0x0D0B0907, 0x1010100F, 0x0B080500, 0x1512100D, 0x05001515,
+ 0x100D0B08, 0x15151512},
+};
+
+static void rtw8922a_set_digital_pwr_comp(struct rtw89_dev *rtwdev,
+ bool enable, u8 nss,
+ enum rtw89_rf_path path)
+{
+ static const u32 ltpc_t0[2] = {R_BE_LTPC_T0_PATH0, R_BE_LTPC_T0_PATH1};
+ const u32 *digital_pwr_comp;
+ u32 addr, val;
+ u32 i;
+
+ if (nss == 1)
+ digital_pwr_comp = rtw8922a_digital_pwr_comp_val[0];
+ else
+ digital_pwr_comp = rtw8922a_digital_pwr_comp_val[1];
+
+ addr = ltpc_t0[path];
+ for (i = 0; i < DIGITAL_PWR_COMP_REG_NUM; i++, addr += 4) {
+ val = enable ? digital_pwr_comp[i] : 0;
+ rtw89_phy_write32(rtwdev, addr, val);
+ }
+}
+
+static void rtw8922a_digital_pwr_comp(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ bool enable = chan->band_type != RTW89_BAND_2G;
+ u8 path;
+
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0)
+ path = RF_PATH_A;
+ else
+ path = RF_PATH_B;
+ rtw8922a_set_digital_pwr_comp(rtwdev, enable, 1, path);
+ } else {
+ rtw8922a_set_digital_pwr_comp(rtwdev, enable, 2, RF_PATH_A);
+ rtw8922a_set_digital_pwr_comp(rtwdev, enable, 2, RF_PATH_B);
+ }
+}
+
static int rtw8922a_ctrl_mlo(struct rtw89_dev *rtwdev, enum rtw89_mlo_dbcc_mode mode)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
if (mode == MLO_1_PLUS_1_1RF || mode == DBCC_LEGACY) {
rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x1);
@@ -1801,11 +1864,13 @@ static void rtw8922a_pre_set_channel_bb(struct rtw89_dev *rtwdev,
}
static void rtw8922a_post_set_channel_bb(struct rtw89_dev *rtwdev,
- enum rtw89_mlo_dbcc_mode mode)
+ enum rtw89_mlo_dbcc_mode mode,
+ enum rtw89_phy_idx phy_idx)
{
if (!rtwdev->dbcc_en)
return;
+ rtw8922a_digital_pwr_comp(rtwdev, phy_idx);
rtw8922a_ctrl_mlo(rtwdev, mode);
}
@@ -1912,7 +1977,7 @@ static void rtw8922a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
rtw8922a_hal_reset(rtwdev, phy_idx, mac_idx, chan->band_type, &p->tx_en, enter);
if (!enter) {
- rtw8922a_post_set_channel_bb(rtwdev, rtwdev->mlo_dbcc_mode);
+ rtw8922a_post_set_channel_bb(rtwdev, rtwdev->mlo_dbcc_mode, phy_idx);
rtw8922a_post_set_channel_rf(rtwdev, phy_idx);
}
}
@@ -1928,10 +1993,12 @@ static void rtw8922a_rfk_init(struct rtw89_dev *rtwdev)
static void rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+
rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, RTW89_PHY_0, 5);
- rtw89_phy_rfk_dack_and_wait(rtwdev, RTW89_PHY_0, 58);
- rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+ rtw89_phy_rfk_dack_and_wait(rtwdev, RTW89_PHY_0, chan, 58);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, chan, 32);
}
static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
@@ -1953,10 +2020,12 @@ static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
}
}
-static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev)
+static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_START);
@@ -1964,23 +2033,25 @@ static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev)
_wait_rx_mode(rtwdev, RF_AB);
rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, phy_idx, 5);
- rtw89_phy_rfk_txgapk_and_wait(rtwdev, phy_idx, 54);
- rtw89_phy_rfk_iqk_and_wait(rtwdev, phy_idx, 84);
- rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_NORMAL, 6);
- rtw89_phy_rfk_dpk_and_wait(rtwdev, phy_idx, 34);
- rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+ rtw89_phy_rfk_txgapk_and_wait(rtwdev, phy_idx, chan, 54);
+ rtw89_phy_rfk_iqk_and_wait(rtwdev, phy_idx, chan, 84);
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, chan, RTW89_TSSI_NORMAL, 6);
+ rtw89_phy_rfk_dpk_and_wait(rtwdev, phy_idx, chan, 34);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, chan, 32);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_STOP);
}
static void rtw8922a_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_SCAN, 6);
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, chan, RTW89_TSSI_SCAN, 6);
}
-static void rtw8922a_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+static void rtw8922a_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool start)
{
}
@@ -2109,7 +2180,7 @@ static void rtw8922a_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
static void rtw8922a_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
struct rtw89_hal *hal = &rtwdev->hal;
u8 ntx_path = RF_PATH_AB;
@@ -2426,6 +2497,38 @@ static void rtw8922a_query_ppdu(struct rtw89_dev *rtwdev,
rtw8922a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
}
+static void rtw8922a_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ /* Mapping to BW: 5, 10, 20, 40, 80, 160, 80_80 */
+ static const u8 bw_compensate[] = {0, 0, 0, 6, 12, 18, 0};
+ u8 *rssi = phy_ppdu->rssi;
+ u8 compensate = 0;
+ u16 rpl_tmp;
+ u8 i;
+
+ if (phy_ppdu->bw_idx < ARRAY_SIZE(bw_compensate))
+ compensate = bw_compensate[phy_ppdu->bw_idx];
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ if (!(phy_ppdu->rx_path_en & BIT(i))) {
+ rssi[i] = 0;
+ phy_ppdu->rpl_path[i] = 0;
+ phy_ppdu->rpl_fd[i] = 0;
+ }
+ if (phy_ppdu->rate >= RTW89_HW_RATE_OFDM6) {
+ rpl_tmp = phy_ppdu->rpl_fd[i];
+ if (rpl_tmp)
+ rpl_tmp += compensate;
+
+ phy_ppdu->rpl_path[i] = rpl_tmp;
+ }
+ rssi[i] = phy_ppdu->rpl_path[i];
+ }
+
+ phy_ppdu->rssi_avg = phy_ppdu->rpl_avg;
+}
+
static int rtw8922a_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_set(rtwdev, R_BE_FEN_RST_ENABLE,
@@ -2445,10 +2548,12 @@ static int rtw8922a_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
- .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_NET_DETECT,
.n_patterns = RTW89_MAX_PATTERN_NUM,
.pattern_max_len = RTW89_MAX_PATTERN_SIZE,
.pattern_min_len = 1,
+ .max_nd_match_sets = RTW89_SCANOFLD_MAX_SSID,
};
#endif
@@ -2481,9 +2586,11 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.get_thermal = rtw8922a_get_thermal,
.ctrl_btg_bt_rx = rtw8922a_ctrl_btg_bt_rx,
.query_ppdu = rtw8922a_query_ppdu,
+ .convert_rpl_to_rssi = rtw8922a_convert_rpl_to_rssi,
.ctrl_nbtg_bt_tx = rtw8922a_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8922a_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = NULL,
+ .digital_pwr_comp = rtw8922a_digital_pwr_comp,
.pwr_on_func = rtw8922a_pwr_on_func,
.pwr_off_func = rtw8922a_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc_v2,
@@ -2549,6 +2656,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.dig_regs = &rtw8922a_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = 32,
+ .support_link_num = 2,
.support_chanctx_num = 2,
.support_rnr = true,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -2562,6 +2670,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = true,
+ .hw_mgmt_tx_encrypt = true,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
@@ -2624,6 +2733,8 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.rrsr_cfgs = &rtw8922a_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_VLD_V2, B_BSS_CLR_VLD0_V2},
.bss_clr_map_reg = R_BSS_CLR_MAP_V2,
+ .rfkill_init = &rtw8922a_rfkill_regs,
+ .rfkill_get = {R_BE_GPIO_EXT_CTRL, B_BE_GPIO_IN_9},
.dma_ch_mask = 0,
.edcca_regs = &rtw8922a_edcca_regs,
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
index 0ebcb06ae848..28907df7407d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
@@ -256,7 +256,7 @@ static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev)
{
struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V1] = {};
- enum rtw89_sub_entity_idx sub_entity_idx;
+ enum rtw89_chanctx_idx chanctx_idx;
const struct rtw89_chan *chan;
enum rtw89_entity_mode mode;
u8 s0_tbl, s1_tbl;
@@ -265,14 +265,14 @@ static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev)
mode = rtw89_get_entity_mode(rtwdev);
switch (mode) {
case RTW89_ENTITY_MODE_MCC_PREPARE:
- sub_entity_idx = RTW89_SUB_ENTITY_1;
+ chanctx_idx = RTW89_CHANCTX_1;
break;
default:
- sub_entity_idx = RTW89_SUB_ENTITY_0;
+ chanctx_idx = RTW89_CHANCTX_0;
break;
}
- chan = rtw89_chan_get(rtwdev, sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, chanctx_idx);
for (tbl_sel = 0; tbl_sel < ARRAY_SIZE(desc); tbl_sel++) {
struct rtw89_rfk_chan_desc *p = &desc[tbl_sel];
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index 1b2a400406ae..27826d909785 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -366,7 +366,7 @@ static void rtw89_tas_state_update(struct rtw89_dev *rtwdev)
if (src == RTW89_SAR_SOURCE_NONE)
return;
- chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
ret = sar_hdl->query_sar_config(rtwdev, chan->freq, &cfg);
if (ret)
return;
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index 3882938c0893..b2e47829983f 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -14,6 +14,7 @@
#define DATA_RATE_HT_IDX_MASK GENMASK(4, 0)
#define DATA_RATE_HT_IDX_MASK_V1 GENMASK(4, 0)
#define DATA_RATE_MODE_HT 0x1
+#define DATA_RATE_HT_NSS_MASK GENMASK(4, 3)
#define DATA_RATE_VHT_HE_NSS_MASK GENMASK(6, 4)
#define DATA_RATE_VHT_HE_IDX_MASK GENMASK(3, 0)
#define DATA_RATE_NSS_MASK_V1 GENMASK(7, 5)
@@ -51,6 +52,11 @@ static inline u8 rtw89_get_data_mcs(struct rtw89_dev *rtwdev, u16 hw_rate)
return u16_get_bits(hw_rate, DATA_RATE_VHT_HE_IDX_MASK);
}
+static inline u8 rtw89_get_data_ht_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
+{
+ return u16_get_bits(hw_rate, DATA_RATE_HT_NSS_MASK);
+}
+
static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
{
if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
@@ -408,7 +414,7 @@ struct rtw89_rxinfo_user {
#define RTW89_RXINFO_USER_DATA BIT(1)
#define RTW89_RXINFO_USER_CTRL BIT(2)
#define RTW89_RXINFO_USER_MGMT BIT(3)
-#define RTW89_RXINFO_USER_BCM BIT(4)
+#define RTW89_RXINFO_USER_BCN BIT(4)
#define RTW89_RXINFO_USER_MACID GENMASK(15, 8)
struct rtw89_rxinfo {
@@ -435,6 +441,7 @@ struct rtw89_phy_sts_hdr {
} __packed;
#define RTW89_PHY_STS_HDR_W0_IE_MAP GENMASK(4, 0)
+#define RTW89_PHY_STS_HDR_W0_HDR_2_EN BIT(5)
#define RTW89_PHY_STS_HDR_W0_VALID BIT(7)
#define RTW89_PHY_STS_HDR_W0_LEN GENMASK(15, 8)
#define RTW89_PHY_STS_HDR_W0_RSSI_AVG GENMASK(31, 24)
@@ -443,6 +450,13 @@ struct rtw89_phy_sts_hdr {
#define RTW89_PHY_STS_HDR_W1_RSSI_C GENMASK(23, 16)
#define RTW89_PHY_STS_HDR_W1_RSSI_D GENMASK(31, 24)
+struct rtw89_phy_sts_hdr_v2 {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_PHY_STS_HDR_V2_W0_PATH_EN GENMASK(20, 16)
+
struct rtw89_phy_sts_iehdr {
__le32 w0;
};
@@ -546,13 +560,43 @@ struct rtw89_phy_sts_iehdr {
#define BE_RXD_HDR_OFFSET_MASK GENMASK(20, 16)
#define BE_RXD_WL_HD_IV_LEN_MASK GENMASK(26, 21)
-struct rtw89_phy_sts_ie0 {
+struct rtw89_phy_sts_ie00 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+} __packed;
+
+#define RTW89_PHY_STS_IE00_W0_RPL GENMASK(15, 7)
+
+struct rtw89_phy_sts_ie00_v2 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+} __packed;
+
+#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_A GENMASK(8, 0)
+#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_B GENMASK(17, 9)
+#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_C GENMASK(26, 18)
+#define RTW89_PHY_STS_IE00_V2_W5_RPL_TD_D GENMASK(8, 0)
+
+struct rtw89_phy_sts_ie01 {
__le32 w0;
__le32 w1;
__le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
} __packed;
#define RTW89_PHY_STS_IE01_W0_CH_IDX GENMASK(23, 16)
+#define RTW89_PHY_STS_IE01_W0_RSSI_AVG_FD GENMASK(15, 8)
+#define RTW89_PHY_STS_IE01_W0_RX_PATH_EN GENMASK(31, 28)
#define RTW89_PHY_STS_IE01_W1_FD_CFO GENMASK(19, 8)
#define RTW89_PHY_STS_IE01_W1_PREMB_CFO GENMASK(31, 20)
#define RTW89_PHY_STS_IE01_W2_AVG_SNR GENMASK(5, 0)
@@ -561,6 +605,25 @@ struct rtw89_phy_sts_ie0 {
#define RTW89_PHY_STS_IE01_W2_LDPC BIT(28)
#define RTW89_PHY_STS_IE01_W2_STBC BIT(30)
+struct rtw89_phy_sts_ie01_v2 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+} __packed;
+
+#define RTW89_PHY_STS_IE01_V2_W5_BW_IDX GENMASK(31, 29)
+#define RTW89_PHY_STS_IE01_V2_W8_RPL_FD_A GENMASK(11, 4)
+#define RTW89_PHY_STS_IE01_V2_W8_RPL_FD_B GENMASK(23, 16)
+#define RTW89_PHY_STS_IE01_V2_W9_RPL_FD_C GENMASK(11, 4)
+#define RTW89_PHY_STS_IE01_V2_W9_RPL_FD_D GENMASK(23, 16)
+
enum rtw89_tx_channel {
RTW89_TXCH_ACH0 = 0,
RTW89_TXCH_ACH1 = 1,
diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h
index e82e7df052d8..e669544cafd3 100644
--- a/drivers/net/wireless/realtek/rtw89/util.h
+++ b/drivers/net/wireless/realtek/rtw89/util.h
@@ -16,6 +16,24 @@
#define rtw89_for_each_rtwvif(rtwdev, rtwvif) \
list_for_each_entry(rtwvif, &(rtwdev)->rtwvifs_list, list)
+/* Before adding rtwvif to list, we need to check if it already exist, beacase
+ * in some case such as SER L2 happen during WoWLAN flow, calling reconfig
+ * twice cause the list to be added twice.
+ */
+static inline bool rtw89_rtwvif_in_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *new)
+{
+ struct rtw89_vif *rtwvif;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ if (rtwvif == new)
+ return true;
+
+ return false;
+}
+
/* The result of negative dividend and positive divisor is undefined, but it
* should be one case of round-down or round-up. So, make it round-down if the
* result is round-up.
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 9882064ef68d..86e24e07780d 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -687,17 +687,30 @@ static void rtw89_wow_enter_deep_ps(struct rtw89_dev *rtwdev)
__rtw89_enter_ps_mode(rtwdev, rtwvif);
}
-static void rtw89_wow_enter_lps(struct rtw89_dev *rtwdev)
+static void rtw89_wow_enter_ps(struct rtw89_dev *rtwdev)
{
struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
- rtw89_enter_lps(rtwdev, rtwvif, false);
+ if (rtw89_wow_mgd_linked(rtwdev))
+ rtw89_enter_lps(rtwdev, rtwvif, false);
+ else if (rtw89_wow_no_link(rtwdev))
+ rtw89_fw_h2c_fwips(rtwdev, rtwvif, true);
}
-static void rtw89_wow_leave_lps(struct rtw89_dev *rtwdev)
+static void rtw89_wow_leave_ps(struct rtw89_dev *rtwdev, bool enable_wow)
{
- rtw89_leave_lps(rtwdev);
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+
+ if (rtw89_wow_mgd_linked(rtwdev)) {
+ rtw89_leave_lps(rtwdev);
+ } else if (rtw89_wow_no_link(rtwdev)) {
+ if (enable_wow)
+ rtw89_leave_ips(rtwdev);
+ else
+ rtw89_fw_h2c_fwips(rtwdev, rtwvif, false);
+ }
}
static int rtw89_wow_config_mac(struct rtw89_dev *rtwdev, bool enable_wow)
@@ -781,17 +794,22 @@ static void rtw89_wow_vif_iter(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvi
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
- /* Current wowlan function support setting of only one STATION vif.
- * So when one suitable vif is found, stop the iteration.
+ /* Current WoWLAN function support setting of only vif in
+ * infra mode or no link mode. When one suitable vif is found,
+ * stop the iteration.
*/
if (rtw_wow->wow_vif || vif->type != NL80211_IFTYPE_STATION)
return;
switch (rtwvif->net_type) {
case RTW89_NET_TYPE_INFRA:
- rtw_wow->wow_vif = vif;
+ if (rtw_wow_has_mgd_features(rtwdev))
+ rtw_wow->wow_vif = vif;
break;
case RTW89_NET_TYPE_NO_LINK:
+ if (rtw_wow->pno_inited)
+ rtw_wow->wow_vif = vif;
+ break;
default:
break;
}
@@ -1025,6 +1043,23 @@ static void rtw89_wow_clear_wakeups(struct rtw89_dev *rtwdev)
rtw_wow->wow_vif = NULL;
rtw89_core_release_all_bits_map(rtw_wow->flags, RTW89_WOW_FLAG_NUM);
rtw_wow->pattern_cnt = 0;
+ rtw_wow->pno_inited = false;
+}
+
+static void rtw89_wow_init_pno(struct rtw89_dev *rtwdev,
+ struct cfg80211_sched_scan_request *nd_config)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+
+ if (!nd_config->n_match_sets || !nd_config->n_channels)
+ return;
+
+ rtw_wow->nd_config = nd_config;
+ rtw_wow->pno_inited = true;
+
+ INIT_LIST_HEAD(&rtw_wow->pno_pkt_list);
+
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: net-detect is enabled\n");
}
static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev,
@@ -1037,6 +1072,11 @@ static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev,
set_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags);
if (wowlan->magic_pkt)
set_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags);
+ if (wowlan->n_patterns && wowlan->patterns)
+ set_bit(RTW89_WOW_FLAG_EN_PATTERN, rtw_wow->flags);
+
+ if (wowlan->nd_config)
+ rtw89_wow_init_pno(rtwdev, wowlan->nd_config);
rtw89_for_each_rtwvif(rtwdev, rtwvif)
rtw89_wow_vif_iter(rtwdev, rtwvif);
@@ -1048,6 +1088,34 @@ static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev,
return rtw89_wow_parse_patterns(rtwdev, rtwvif, wowlan);
}
+static int rtw89_wow_cfg_wake_pno(struct rtw89_dev *rtwdev, bool wow)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+ int ret;
+
+ ret = rtw89_fw_h2c_cfg_pno(rtwdev, rtwvif, true);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to config pno\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_wow_wakeup_ctrl(rtwdev, rtwvif, wow);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to fw wow wakeup ctrl\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_wow_global(rtwdev, rtwvif, wow);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to fw wow global\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int rtw89_wow_cfg_wake(struct rtw89_dev *rtwdev, bool wow)
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
@@ -1308,100 +1376,239 @@ static int rtw89_wow_disable_trx_post(struct rtw89_dev *rtwdev)
return ret;
}
-static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev)
+static void rtw89_fw_release_pno_pkt_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
- struct rtw89_vif *rtwvif = (struct rtw89_vif *)rtw_wow->wow_vif->drv_priv;
+ struct list_head *pkt_list = &rtw_wow->pno_pkt_list;
+ struct rtw89_pktofld_info *info, *tmp;
+
+ list_for_each_entry_safe(info, tmp, pkt_list, list) {
+ rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
+ list_del(&info->list);
+ kfree(info);
+ }
+}
+
+static int rtw89_pno_scan_update_probe_req(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
+ u8 num = nd_config->n_match_sets, i;
+ struct rtw89_pktofld_info *info;
+ struct sk_buff *skb;
int ret;
- rtw89_wow_pattern_write(rtwdev);
- rtw89_wow_construct_key_info(rtwdev);
+ for (i = 0; i < num; i++) {
+ skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
+ nd_config->match_sets[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid_len,
+ nd_config->ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, nd_config->ie, nd_config->ie_len);
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ kfree_skb(skb);
+ rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif);
+ return -ENOMEM;
+ }
- ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, true);
- if (ret) {
- rtw89_err(rtwdev, "wow: failed to enable keep alive\n");
- return ret;
+ ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
+ if (ret) {
+ kfree_skb(skb);
+ kfree(info);
+ rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif);
+ return ret;
+ }
+
+ list_add_tail(&info->list, &rtw_wow->pno_pkt_list);
+ kfree_skb(skb);
}
- ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, true);
- if (ret) {
- rtw89_err(rtwdev, "wow: failed to enable disconnect detect\n");
- goto out;
+ return 0;
+}
+
+static int rtw89_pno_scan_offload(struct rtw89_dev *rtwdev, bool enable)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+ int interval = rtw_wow->nd_config->scan_plans[0].interval;
+ struct rtw89_scan_option opt = {};
+ int ret;
+
+ if (enable) {
+ ret = rtw89_pno_scan_update_probe_req(rtwdev, rtwvif);
+ if (ret) {
+ rtw89_err(rtwdev, "Update probe request failed\n");
+ return ret;
+ }
+
+ ret = mac->add_chan_list_pno(rtwdev, rtwvif);
+ if (ret) {
+ rtw89_err(rtwdev, "Update channel list failed\n");
+ return ret;
+ }
}
- ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, true);
- if (ret) {
- rtw89_err(rtwdev, "wow: failed to enable GTK offload\n");
- goto out;
+ opt.enable = enable;
+ opt.repeat = RTW89_SCAN_NORMAL;
+ opt.norm_pd = max(interval, 1) * 10; /* in unit of 100ms */
+ opt.delay = max(rtw_wow->nd_config->delay, 1);
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP;
+ opt.scan_mode = RTW89_SCAN_MODE_SA;
+ opt.band = RTW89_PHY_0;
+ opt.num_macc_role = 0;
+ opt.mlo_mode = rtwdev->mlo_dbcc_mode;
+ opt.num_opch = 0;
+ opt.opch_end = RTW89_CHAN_INVALID;
}
- ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, true);
- if (ret)
- rtw89_warn(rtwdev, "wow: failed to enable arp offload\n");
+ mac->scan_offload(rtwdev, &opt, rtwvif, true);
- ret = rtw89_wow_cfg_wake(rtwdev, true);
- if (ret) {
- rtw89_err(rtwdev, "wow: failed to config wake\n");
- goto out;
+ return 0;
+}
+
+static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+ int ret;
+
+ if (rtw89_wow_no_link(rtwdev)) {
+ ret = rtw89_pno_scan_offload(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable pno scan offload\n");
+ return ret;
+ }
+
+ ret = rtw89_pno_scan_offload(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to enable pno scan offload\n");
+ return ret;
+ }
+ } else {
+ rtw89_wow_pattern_write(rtwdev);
+ rtw89_wow_construct_key_info(rtwdev);
+
+ ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to enable keep alive\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to enable disconnect detect\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to enable GTK offload\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, true);
+ if (ret)
+ rtw89_warn(rtwdev, "wow: failed to enable arp offload\n");
+ }
+
+ if (rtw89_wow_no_link(rtwdev)) {
+ ret = rtw89_wow_cfg_wake_pno(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to config wake PNO\n");
+ return ret;
+ }
+ } else {
+ ret = rtw89_wow_cfg_wake(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to config wake\n");
+ return ret;
+ }
}
ret = rtw89_wow_check_fw_status(rtwdev, true);
if (ret) {
rtw89_err(rtwdev, "wow: failed to check enable fw ready\n");
- goto out;
+ return ret;
}
-out:
- return ret;
+ return 0;
}
static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev)
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
- struct rtw89_vif *rtwvif = (struct rtw89_vif *)rtw_wow->wow_vif->drv_priv;
+ struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
int ret;
- rtw89_wow_pattern_clear(rtwdev);
+ if (rtw89_wow_no_link(rtwdev)) {
+ ret = rtw89_pno_scan_offload(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable pno scan offload\n");
+ return ret;
+ }
- ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, false);
- if (ret) {
- rtw89_err(rtwdev, "wow: failed to disable keep alive\n");
- goto out;
- }
+ ret = rtw89_fw_h2c_cfg_pno(rtwdev, rtwvif, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable pno\n");
+ return ret;
+ }
- ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, false);
- if (ret) {
- rtw89_err(rtwdev, "wow: failed to disable disconnect detect\n");
- goto out;
- }
+ rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif);
+ } else {
+ rtw89_wow_pattern_clear(rtwdev);
- ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, false);
- if (ret) {
- rtw89_err(rtwdev, "wow: failed to disable GTK offload\n");
- goto out;
- }
+ ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable keep alive\n");
+ return ret;
+ }
- ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, false);
- if (ret)
- rtw89_warn(rtwdev, "wow: failed to disable arp offload\n");
+ ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable disconnect detect\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable GTK offload\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, false);
+ if (ret)
+ rtw89_warn(rtwdev, "wow: failed to disable arp offload\n");
+
+ rtw89_wow_key_clear(rtwdev);
+ rtw89_fw_release_general_pkt_list(rtwdev, true);
+ }
- rtw89_wow_key_clear(rtwdev);
- rtw89_fw_release_general_pkt_list(rtwdev, true);
ret = rtw89_wow_cfg_wake(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "wow: failed to disable config wake\n");
- goto out;
+ return ret;
}
ret = rtw89_wow_check_fw_status(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "wow: failed to check disable fw ready\n");
- goto out;
+ return ret;
}
-out:
- return ret;
+ return 0;
}
static int rtw89_wow_enable(struct rtw89_dev *rtwdev)
@@ -1430,7 +1637,7 @@ static int rtw89_wow_enable(struct rtw89_dev *rtwdev)
goto out;
}
- rtw89_wow_enter_lps(rtwdev);
+ rtw89_wow_enter_ps(rtwdev);
ret = rtw89_wow_enable_trx_post(rtwdev);
if (ret) {
@@ -1455,7 +1662,7 @@ static int rtw89_wow_disable(struct rtw89_dev *rtwdev)
goto out;
}
- rtw89_wow_leave_lps(rtwdev);
+ rtw89_wow_leave_ps(rtwdev, false);
ret = rtw89_wow_fw_stop(rtwdev);
if (ret) {
@@ -1480,6 +1687,12 @@ out:
return ret;
}
+static void rtw89_wow_restore_ps(struct rtw89_dev *rtwdev)
+{
+ if (rtw89_wow_no_link(rtwdev))
+ rtw89_enter_ips(rtwdev);
+}
+
int rtw89_wow_resume(struct rtw89_dev *rtwdev)
{
int ret;
@@ -1504,6 +1717,7 @@ int rtw89_wow_resume(struct rtw89_dev *rtwdev)
if (ret)
rtw89_err(rtwdev, "failed to disable wow\n");
+ rtw89_wow_restore_ps(rtwdev);
out:
rtw89_wow_clear_wakeups(rtwdev);
return ret;
@@ -1519,7 +1733,7 @@ int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan)
return ret;
}
- rtw89_wow_leave_lps(rtwdev);
+ rtw89_wow_leave_ps(rtwdev, true);
ret = rtw89_wow_enable(rtwdev);
if (ret) {
diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h
index 0d90add0e88d..3fbc2b87c058 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.h
+++ b/drivers/net/wireless/realtek/rtw89/wow.h
@@ -95,6 +95,29 @@ static inline int rtw89_wow_get_sec_hdr_len(struct rtw89_dev *rtwdev)
}
#ifdef CONFIG_PM
+static inline bool rtw89_wow_mgd_linked(struct rtw89_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+
+ return rtwvif->net_type == RTW89_NET_TYPE_INFRA;
+}
+
+static inline bool rtw89_wow_no_link(struct rtw89_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+
+ return rtwvif->net_type == RTW89_NET_TYPE_NO_LINK;
+}
+
+static inline bool rtw_wow_has_mgd_features(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+
+ return !bitmap_empty(rtw_wow->flags, RTW89_WOW_FLAG_NUM);
+}
+
int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan);
int rtw89_wow_resume(struct rtw89_dev *rtwdev);
void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb);
diff --git a/drivers/net/wireless/rsi/rsi_debugfs.h b/drivers/net/wireless/rsi/rsi_debugfs.h
index a6a28640ad40..bbc1200dbb62 100644
--- a/drivers/net/wireless/rsi/rsi_debugfs.h
+++ b/drivers/net/wireless/rsi/rsi_debugfs.h
@@ -39,7 +39,6 @@ struct rsi_dbg_files {
struct rsi_debugfs {
struct dentry *subdir;
- struct rsi_dbg_ops *dfs_get_ops;
struct dentry *rsi_files[MAX_DEBUGFS_ENTRIES];
};
int rsi_init_dbgfs(struct rsi_hw *adapter);
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index 216d43c8bd6e..7c04810dbf3d 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -352,8 +352,11 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
skb->len - ieoffset);
- if (unlikely(!ptr))
+ if (!ptr) {
+ /* No RSN IE is fine in open networks */
+ ret = 0;
goto free_skb;
+ }
ptr += pairwise_cipher_suite_count_offset;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index 34d95f458e1a..a9f090e15cbb 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -142,7 +142,7 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
wl18xx_radar_type_decode(mbox->radar_type));
if (!wl->radar_debug_mode)
- ieee80211_radar_detected(wl->hw);
+ ieee80211_radar_detected(wl->hw, NULL);
}
if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index d86e6ff4523d..f0e528abb1b4 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(mlo, "Support MLO");
static bool multi_radio;
module_param(multi_radio, bool, 0444);
-MODULE_PARM_DESC(mlo, "Support Multiple Radios per wiphy");
+MODULE_PARM_DESC(multi_radio, "Support Multiple Radios per wiphy");
/**
* enum hwsim_regtest - the type of regulatory tests we offer
@@ -1146,7 +1146,7 @@ static int hwsim_write_simulate_radar(void *dat, u64 val)
{
struct mac80211_hwsim_data *data = dat;
- ieee80211_radar_detected(data->hw);
+ ieee80211_radar_detected(data->hw, NULL);
return 0;
}
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index a8a94edf2a70..9ae10f65f2af 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -17,7 +17,7 @@
#include <linux/workqueue.h>
#include <linux/module.h>
#include <net/mac80211.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "zd_def.h"
#include "zd_mac.h"
diff --git a/drivers/net/wwan/qcom_bam_dmux.c b/drivers/net/wwan/qcom_bam_dmux.c
index 26ca719fa0de..5dcb9a84a12e 100644
--- a/drivers/net/wwan/qcom_bam_dmux.c
+++ b/drivers/net/wwan/qcom_bam_dmux.c
@@ -823,17 +823,17 @@ static int bam_dmux_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(dev, pc_ack_irq, NULL, bam_dmux_pc_ack_irq,
IRQF_ONESHOT, NULL, dmux);
if (ret)
- return ret;
+ goto err_disable_pm;
ret = devm_request_threaded_irq(dev, dmux->pc_irq, NULL, bam_dmux_pc_irq,
IRQF_ONESHOT, NULL, dmux);
if (ret)
- return ret;
+ goto err_disable_pm;
ret = irq_get_irqchip_state(dmux->pc_irq, IRQCHIP_STATE_LINE_LEVEL,
&dmux->pc_state);
if (ret)
- return ret;
+ goto err_disable_pm;
/* Check if remote finished initialization before us */
if (dmux->pc_state) {
@@ -844,6 +844,11 @@ static int bam_dmux_probe(struct platform_device *pdev)
}
return 0;
+
+err_disable_pm:
+ pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ return ret;
}
static void bam_dmux_remove(struct platform_device *pdev)
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
index 8d864d4ed77f..79f17100f70b 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -53,6 +53,7 @@
#define RGU_RESET_DELAY_MS 10
#define PORT_RESET_DELAY_MS 2000
+#define FASTBOOT_RESET_DELAY_MS 2000
#define EX_HS_TIMEOUT_MS 5000
#define EX_HS_POLL_DELAY_MS 10
@@ -167,19 +168,52 @@ static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
}
kfree(buffer.pointer);
+#else
+ struct device *dev = &t7xx_dev->pdev->dev;
+ int ret;
+ ret = pci_reset_function(t7xx_dev->pdev);
+ if (ret) {
+ dev_err(dev, "Failed to reset device, error:%d\n", ret);
+ return ret;
+ }
#endif
return 0;
}
-int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
+static void t7xx_host_event_notify(struct t7xx_pci_dev *t7xx_dev, unsigned int event_id)
{
- return t7xx_acpi_reset(t7xx_dev, "_RST");
+ u32 value;
+
+ value = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ value &= ~HOST_EVENT_MASK;
+ value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
+ iowrite32(value, IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
}
-int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev)
+int t7xx_reset_device(struct t7xx_pci_dev *t7xx_dev, enum reset_type type)
{
- return t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+ int ret = 0;
+
+ pci_save_state(t7xx_dev->pdev);
+ t7xx_pci_reprobe_early(t7xx_dev);
+ t7xx_mode_update(t7xx_dev, T7XX_RESET);
+
+ if (type == FLDR) {
+ ret = t7xx_acpi_reset(t7xx_dev, "_RST");
+ } else if (type == PLDR) {
+ ret = t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+ } else if (type == FASTBOOT) {
+ t7xx_host_event_notify(t7xx_dev, FASTBOOT_DL_NOTIFY);
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
+ msleep(FASTBOOT_RESET_DELAY_MS);
+ }
+
+ pci_restore_state(t7xx_dev->pdev);
+ if (ret)
+ return ret;
+
+ return t7xx_pci_reprobe(t7xx_dev, true);
}
static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
@@ -188,16 +222,15 @@ static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
if (val & MISC_RESET_TYPE_PLDR)
- t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+ t7xx_reset_device(t7xx_dev, PLDR);
else if (val & MISC_RESET_TYPE_FLDR)
- t7xx_acpi_fldr_func(t7xx_dev);
+ t7xx_reset_device(t7xx_dev, FLDR);
}
static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
{
struct t7xx_pci_dev *t7xx_dev = data;
- t7xx_mode_update(t7xx_dev, T7XX_RESET);
msleep(RGU_RESET_DELAY_MS);
t7xx_reset_device_via_pmic(t7xx_dev);
return IRQ_HANDLED;
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
index b39e945a92e0..39ed0000fbba 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
@@ -78,14 +78,19 @@ struct t7xx_modem {
spinlock_t exp_lock; /* Protects exception events */
};
+enum reset_type {
+ FLDR,
+ PLDR,
+ FASTBOOT,
+};
+
void t7xx_md_exception_handshake(struct t7xx_modem *md);
void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id);
int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev);
int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev);
void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev);
void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev);
-int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev);
-int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_reset_device(struct t7xx_pci_dev *t7xx_dev, enum reset_type type);
int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev);
#endif /* __T7XX_MODEM_OPS_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 10a8c1080b10..e556e5bd49ab 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -69,6 +69,7 @@ static ssize_t t7xx_mode_store(struct device *dev,
{
struct t7xx_pci_dev *t7xx_dev;
struct pci_dev *pdev;
+ enum t7xx_mode mode;
int index = 0;
pdev = to_pci_dev(dev);
@@ -76,12 +77,22 @@ static ssize_t t7xx_mode_store(struct device *dev,
if (!t7xx_dev)
return -ENODEV;
+ mode = READ_ONCE(t7xx_dev->mode);
+
index = sysfs_match_string(t7xx_mode_names, buf);
+ if (index == mode)
+ return -EBUSY;
+
if (index == T7XX_FASTBOOT_SWITCHING) {
+ if (mode == T7XX_FASTBOOT_DOWNLOAD)
+ return count;
+
WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING);
+ pm_runtime_resume(dev);
+ t7xx_reset_device(t7xx_dev, FASTBOOT);
} else if (index == T7XX_RESET) {
- WRITE_ONCE(t7xx_dev->mode, T7XX_RESET);
- t7xx_acpi_pldr_func(t7xx_dev);
+ pm_runtime_resume(dev);
+ t7xx_reset_device(t7xx_dev, PLDR);
}
return count;
@@ -446,7 +457,7 @@ static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
if (is_d3) {
t7xx_mhccif_init(t7xx_dev);
- return t7xx_pci_pm_reinit(t7xx_dev);
+ t7xx_pci_pm_reinit(t7xx_dev);
}
return 0;
@@ -481,6 +492,33 @@ static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
return ret;
}
+int t7xx_pci_reprobe_early(struct t7xx_pci_dev *t7xx_dev)
+{
+ enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
+ int ret;
+
+ if (mode == T7XX_FASTBOOT_DOWNLOAD)
+ pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
+
+ ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int t7xx_pci_reprobe(struct t7xx_pci_dev *t7xx_dev, bool boot)
+{
+ int ret;
+
+ ret = t7xx_pcie_reinit(t7xx_dev, boot);
+ if (ret)
+ return ret;
+
+ t7xx_clear_rgu_irq(t7xx_dev);
+ return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
+}
+
static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
{
struct t7xx_pci_dev *t7xx_dev;
@@ -507,16 +545,11 @@ static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
if (prev_state == PM_RESUME_REG_STATE_L3 ||
(prev_state == PM_RESUME_REG_STATE_INIT &&
atr_reg_val == ATR_SRC_ADDR_INVALID)) {
- ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
- if (ret)
- return ret;
-
- ret = t7xx_pcie_reinit(t7xx_dev, true);
+ ret = t7xx_pci_reprobe_early(t7xx_dev);
if (ret)
return ret;
- t7xx_clear_rgu_irq(t7xx_dev);
- return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
+ return t7xx_pci_reprobe(t7xx_dev, true);
}
if (prev_state == PM_RESUME_REG_STATE_EXP ||
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h
index 49a11586d8d8..cd8ea17c2644 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -133,4 +133,7 @@ int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_en
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev);
void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev);
void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode);
+int t7xx_pci_reprobe(struct t7xx_pci_dev *t7xx_dev, bool boot);
+int t7xx_pci_reprobe_early(struct t7xx_pci_dev *t7xx_dev);
+
#endif /* __T7XX_PCI_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
index 7d6388bf1d7c..35743e7de0c3 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -553,7 +553,6 @@ static int t7xx_proxy_alloc(struct t7xx_modem *md)
md->port_prox = port_prox;
port_prox->dev = dev;
- t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
return 0;
}
diff --git a/drivers/net/wwan/t7xx/t7xx_port_trace.c b/drivers/net/wwan/t7xx/t7xx_port_trace.c
index 6a3f36385865..4ed8b4e29bf1 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_trace.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_trace.c
@@ -59,6 +59,7 @@ static void t7xx_trace_port_uninit(struct t7xx_port *port)
relay_close(relaych);
debugfs_remove_recursive(debugfs_dir);
+ port->log.relaych = NULL;
}
static int t7xx_trace_port_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
index 9889ca4621cf..3931c7a13f5a 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -213,16 +213,6 @@ static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comm
fsm_finish_command(ctl, cmd, 0);
}
-static void t7xx_host_event_notify(struct t7xx_modem *md, unsigned int event_id)
-{
- u32 value;
-
- value = ioread32(IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
- value &= ~HOST_EVENT_MASK;
- value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
- iowrite32(value, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
-}
-
static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int status)
{
struct t7xx_modem *md = ctl->md;
@@ -264,8 +254,14 @@ static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int
static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
{
+ enum t7xx_mode mode;
+
ctl->curr_state = FSM_STATE_STOPPED;
+ mode = READ_ONCE(ctl->md->t7xx_dev->mode);
+ if (mode == T7XX_FASTBOOT_DOWNLOAD || mode == T7XX_FASTBOOT_DUMP)
+ return 0;
+
t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED);
return t7xx_md_reset(ctl->md->t7xx_dev);
}
@@ -284,8 +280,6 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma
{
struct cldma_ctrl *md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
struct t7xx_pci_dev *t7xx_dev = ctl->md->t7xx_dev;
- enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
- int err;
if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
fsm_finish_command(ctl, cmd, -EINVAL);
@@ -296,21 +290,10 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
t7xx_cldma_stop(md_ctrl);
- if (mode == T7XX_FASTBOOT_SWITCHING)
- t7xx_host_event_notify(ctl->md, FASTBOOT_DL_NOTIFY);
-
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
/* Wait for the DRM disable to take effect */
msleep(FSM_DRM_DISABLE_DELAY_MS);
- if (mode == T7XX_FASTBOOT_SWITCHING) {
- t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
- } else {
- err = t7xx_acpi_fldr_func(t7xx_dev);
- if (err)
- t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
- }
-
fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
}
@@ -414,7 +397,9 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command
case T7XX_DEV_STAGE_LK:
dev_dbg(dev, "LK_STAGE Entered\n");
+ t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
t7xx_lk_stage_event_handling(ctl, status);
+
break;
case T7XX_DEV_STAGE_LINUX:
@@ -436,6 +421,9 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command
}
finish_command:
+ if (ret)
+ t7xx_mode_update(md->t7xx_dev, T7XX_UNKNOWN);
+
fsm_finish_command(ctl, cmd, ret);
}
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index ff96f22648ef..45ddce35f6d2 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -95,7 +95,7 @@ static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
static void xenvif_flush_hash(struct xenvif *vif)
{
- struct xenvif_hash_cache_entry *entry;
+ struct xenvif_hash_cache_entry *entry, *n;
unsigned long flags;
if (xenvif_hash_cache_size == 0)
@@ -103,8 +103,7 @@ static void xenvif_flush_hash(struct xenvif *vif)
spin_lock_irqsave(&vif->hash.cache.lock, flags);
- list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
- lockdep_is_held(&vif->hash.cache.lock)) {
+ list_for_each_entry_safe(entry, n, &vif->hash.cache.list, link) {
list_del_rcu(&entry->link);
vif->hash.cache.count--;
kfree_rcu(entry, rcu);